From 531bb344fe436963adba22b7e62fcb43d8b7f095 Mon Sep 17 00:00:00 2001 From: Neeraj Gupta <254676+ua741@users.noreply.github.com> Date: Fri, 1 Mar 2024 13:37:01 +0530 Subject: [PATCH] Import museum --- server/.air.toml | 32 + server/.dockerignore | 1 + .../.github/ISSUE_TEMPLATE/feature_request.md | 14 + server/.github/pull_request_template.md | 3 + server/.github/workflows/dev-ci.yml | 28 + server/.github/workflows/pr.yml | 21 + server/.github/workflows/prod-ci.yml | 28 + server/.gitignore | 10 + server/Dockerfile | 26 + server/LICENSE | 661 ++++++++++ server/README.md | 121 ++ server/RUNNING.md | 185 +++ server/SECURITY.md | 44 + server/cmd/museum/main.go | 962 +++++++++++++++ server/compose.yaml | 93 ++ server/configurations/local.yaml | 264 ++++ server/configurations/production.yaml | 6 + server/ente/access.go | 38 + server/ente/admin.go | 99 ++ server/ente/app.go | 28 + server/ente/authenticator/authenticator.go | 47 + server/ente/billing.go | 188 +++ server/ente/cache/user_data_cache.go | 56 + server/ente/cast/entity.go | 19 + server/ente/collection.go | 147 +++ server/ente/data_cleanup/entity.go | 28 + server/ente/details/userdetails.go | 19 + server/ente/email.go | 36 + server/ente/embedding.go | 37 + server/ente/errors.go | 253 ++++ server/ente/family.go | 71 ++ server/ente/file.go | 213 ++++ server/ente/jwt/jwt.go | 53 + server/ente/kex.go | 6 + server/ente/locationtag.go | 59 + server/ente/offer.go | 13 + server/ente/passkey.go | 14 + server/ente/passkeyCredential.go | 94 ++ server/ente/public_collection.go | 148 +++ server/ente/push.go | 34 + server/ente/remotestore.go | 15 + server/ente/srp.go | 100 ++ server/ente/storagebonus/errors.go | 39 + server/ente/storagebonus/referral.go | 54 + server/ente/storagebonus/referral_type.go | 46 + server/ente/storagebonus/storge_bonus.go | 129 ++ server/ente/trash.go | 47 + server/ente/user.go | 213 ++++ server/ente/userentity/entity.go | 66 + server/ente/webauthnSession.go | 63 + server/go.mod | 123 ++ server/go.sum | 1097 +++++++++++++++++ server/mail-templates/account_deleted.html | 296 +++++ .../account_deleted_active_sub.html | 296 +++++ server/mail-templates/email_changed.html | 235 ++++ server/mail-templates/family_accepted.html | 301 +++++ server/mail-templates/family_invited.html | 385 ++++++ server/mail-templates/family_left.html | 302 +++++ server/mail-templates/family_removed.html | 300 +++++ server/mail-templates/files_collected.html | 299 +++++ .../mobile_app_first_upload.html | 347 ++++++ server/mail-templates/on_hold.html | 285 +++++ server/mail-templates/ott_auth.html | 215 ++++ server/mail-templates/ott_change_email.html | 215 ++++ server/mail-templates/ott_photos.html | 215 ++++ server/mail-templates/report_alert.html | 359 ++++++ .../report_limit_exceeded_alert.html | 353 ++++++ .../storage_limit_exceeded.html | 253 ++++ server/mail-templates/subscription_ended.html | 264 ++++ .../mail-templates/subscription_upgraded.html | 322 +++++ .../mail-templates/successful_referral.html | 298 +++++ .../mail-templates/web_app_first_upload.html | 463 +++++++ .../10_update_temp_object_keys.down.sql | 6 + .../10_update_temp_object_keys.up.sql | 13 + .../11_remove_kek_hash_constraint.down.sql | 1 + .../11_remove_kek_hash_constraint.up.sql | 1 + server/migrations/12_add_hash_limits.down.sql | 3 + server/migrations/12_add_hash_limits.up.sql | 7 + .../migrations/13_add_recovery_key.down.sql | 5 + server/migrations/13_add_recovery_key.up.sql | 5 + server/migrations/14_add_user_agent.down.sql | 3 + server/migrations/14_add_user_agent.up.sql | 3 + .../15_update_subscriptions.down.sql | 5 + .../migrations/15_update_subscriptions.up.sql | 13 + .../16_remove_deleted_file_objects.down.sql | 1 + .../16_remove_deleted_file_objects.up.sql | 11 + ...17_add_notification_history_table.down.sql | 1 + .../17_add_notification_history_table.up.sql | 10 + .../migrations/18_update_size_column.down.sql | 5 + .../migrations/18_update_size_column.up.sql | 5 + .../19_add_encrypted_email_columns.down.sql | 14 + .../19_add_encrypted_email_columns.up.sql | 15 + server/migrations/1_create_tables.down.sql | 31 + server/migrations/1_create_tables.up.sql | 155 +++ server/migrations/20_recompute_usage.down.sql | 1 + server/migrations/20_recompute_usage.up.sql | 19 + server/migrations/21_add_two_factor.down.sql | 8 + server/migrations/21_add_two_factor.up.sql | 43 + .../22_add_location_tag_table.down.sql | 2 + .../22_add_location_tag_table.up.sql | 38 + .../23_update_mem_limit_column.down.sql | 2 + .../23_update_mem_limit_column.up.sql | 2 + server/migrations/24_bump_ids.down.sql | 1 + server/migrations/24_bump_ids.up.sql | 3 + .../25_alter_ids_to_bigint.down.sql | 1 + .../migrations/25_alter_ids_to_bigint.up.sql | 46 + .../26_update_ott_and_twofactor.down.sql | 6 + .../26_update_ott_and_twofactor.up.sql | 19 + .../27_consolidate_files_table.down.sql | 3 + .../27_consolidate_files_table.up.sql | 34 + .../migrations/28_queue_time_stamp.down.sql | 8 + server/migrations/28_queue_time_stamp.up.sql | 14 + .../29_drop_unencrypted_email.down.sql | 2 + .../29_drop_unencrypted_email.up.sql | 2 + .../2_create_subscriptions.down.sql | 5 + .../migrations/2_create_subscriptions.up.sql | 29 + .../migrations/30_add_magic_metadata.up.sql | 2 + .../30_drop_magic_metadata.down.sql | 2 + .../31_add_pub_magic_metadata.up.sql | 2 + .../31_drop_pub_magic_metadata.down.sql | 2 + server/migrations/32_add_trash_table.down.sql | 4 + server/migrations/32_add_trash_table.up.sql | 30 + ...3_change_update_at_column_trigger.down.sql | 9 + .../33_change_update_at_column_trigger.up.sql | 16 + .../34_trash_delete_by_idx.down.sql | 1 + .../migrations/34_trash_delete_by_idx.up.sql | 2 + .../35_add_push_tokens_table.down.sql | 3 + .../35_add_push_tokens_table.up.sql | 23 + .../36_update_key_attr_and_tokens.down.sql | 7 + .../36_update_key_attr_and_tokens.up.sql | 25 + .../migrations/37_public_collection.down.sql | 13 + server/migrations/37_public_collection.up.sql | 78 ++ .../38_add_abuse_report_detail_json.down.sql | 3 + .../38_add_abuse_report_detail_json.up.sql | 3 + ...op_file_and_thumbnail_object_keys.down.sql | 1 + ...drop_file_and_thumbnail_object_keys.up.sql | 3 + .../3_add_encypted_collection_name.down.sql | 4 + .../3_add_encypted_collection_name.up.sql | 4 + .../40_advance_url_settings.down.sql | 13 + .../migrations/40_advance_url_settings.up.sql | 21 + server/migrations/41_add_file_info.down.sql | 2 + server/migrations/41_add_file_info.up.sql | 2 + .../42_collection_add_magic_metadata.up.sql | 2 + ...42_collection_drop_magic_metadata.down.sql | 2 + server/migrations/43_family_plan.down.sql | 7 + server/migrations/43_family_plan.up.sql | 44 + server/migrations/44_trash_add_index.down.sql | 1 + server/migrations/44_trash_add_index.up.sql | 1 + .../45_collection_files_file_id_idx.down.sql | 1 + .../45_collection_files_file_id_idx.up.sql | 1 + server/migrations/46_replication_v2.down.sql | 2 + server/migrations/46_replication_v2.up.sql | 1 + .../migrations/47_wasabi_integration.down.sql | 2 + .../migrations/47_wasabi_integration.up.sql | 1 + .../migrations/48_add_stripe_account.down.sql | 4 + .../migrations/48_add_stripe_account.up.sql | 10 + .../49_add_unique_idx_txn_id.down.sql | 1 + .../49_add_unique_idx_txn_id.up.sql | 3 + .../4_update_subscriptions_schema.down.sql | 6 + .../4_update_subscriptions_schema.up.sql | 6 + server/migrations/50_remote_store.down.sql | 2 + server/migrations/50_remote_store.up.sql | 22 + server/migrations/51_th_time_human.down.sql | 1 + server/migrations/51_th_time_human.up.sql | 9 + server/migrations/52_authenticator.down.sql | 6 + server/migrations/52_authenticator.up.sql | 57 + server/migrations/53_add_apps.down.sql | 5 + server/migrations/53_add_apps.up.sql | 5 + .../54_update_public_tokens.down.sql | 1 + .../migrations/54_update_public_tokens.up.sql | 1 + server/migrations/55_extend_subs.down.sql | 1 + server/migrations/55_extend_subs.up.sql | 6 + ...add_uncategorized_type_constraint.down.sql | 1 + ...6_add_uncategorized_type_constraint.up.sql | 3 + server/migrations/57_object_copies.down.sql | 1 + server/migrations/57_object_copies.up.sql | 26 + .../58_update_ott_unique_constraint.down.sql | 8 + .../58_update_ott_unique_constraint.up.sql | 8 + server/migrations/59_delete_data.down.sql | 2 + server/migrations/59_delete_data.up.sql | 23 + ..._rename_storage_in_mbs_to_storage.down.sql | 1 + .../5_rename_storage_in_mbs_to_storage.up.sql | 1 + server/migrations/60_add_columns_cf.down.sql | 8 + server/migrations/60_add_columns_cf.up.sql | 12 + server/migrations/61_storage_bonus.down.sql | 3 + server/migrations/61_storage_bonus.up.sql | 46 + server/migrations/62_entity_store.down.sql | 6 + server/migrations/62_entity_store.up.sql | 64 + server/migrations/63_add_kex_store.down.sql | 1 + server/migrations/63_add_kex_store.up.sql | 7 + ...ollection_drop_pub_magic_metadata.down.sql | 2 + .../64_collection_pub_magic_metadata.up.sql | 2 + ...ed_collection_drop_magic_metadata.down.sql | 2 + ...65_shared_collection_magic_metadata.up.sql | 2 + .../migrations/66_add_srp_attributes.down.sql | 3 + .../migrations/66_add_srp_attributes.up.sql | 41 + .../migrations/67_add_email_mfa_column.up.sql | 10 + .../migrations/67_email_mfa_column.down.sql | 2 + .../migrations/68_add_locker_changes.down.sql | 20 + .../migrations/68_add_locker_changes.up.sql | 22 + .../69_add_srp_updated_at_trigger.down.sql | 3 + .../69_add_srp_updated_at_trigger.up.sql | 13 + server/migrations/6_create_usage.down.sql | 4 + server/migrations/6_create_usage.up.sql | 29 + server/migrations/70_add_embeddings.down.sql | 4 + server/migrations/70_add_embeddings.up.sql | 25 + .../migrations/71_user_source_attr.down.sql | 6 + server/migrations/71_user_source_attr.up.sql | 5 + server/migrations/72_add_on_storage.down.sql | 6 + server/migrations/72_add_on_storage.up.sql | 6 + .../73_remove_kex_user_id_fk.down.sql | 5 + .../73_remove_kex_user_id_fk.up.sql | 3 + server/migrations/74_add_onnx.down.sql | 2 + server/migrations/74_add_onnx.up.sql | 1 + server/migrations/75_add_passkeys.down.sql | 5 + server/migrations/75_add_passkeys.up.sql | 61 + .../76_add_passkey_login_sessions.down.sql | 1 + .../76_add_passkey_login_sessions.up.sql | 10 + .../77_embeddings_table_update.down.sql | 3 + .../77_embeddings_table_update.up.sql | 3 + server/migrations/78_cast.down.sql | 2 + server/migrations/78_cast.up.sql | 16 + server/migrations/79_queue_index.down.sql | 1 + server/migrations/79_queue_index.up.sql | 3 + .../7_add_unique_usage_user_id.down.sql | 1 + .../7_add_unique_usage_user_id.up.sql | 1 + ...pdate_otts_schema_link_with_email.down.sql | 11 + ..._update_otts_schema_link_with_email.up.sql | 11 + .../migrations/9_create_datacenter.down.sql | 11 + server/migrations/9_create_datacenter.up.sql | 26 + server/pkg/api/admin.go | 429 +++++++ server/pkg/api/authenticator.go | 111 ++ server/pkg/api/billing.go | 282 +++++ server/pkg/api/cast.go | 144 +++ server/pkg/api/collection.go | 446 +++++++ server/pkg/api/embedding.go | 61 + server/pkg/api/family.go | 132 ++ server/pkg/api/file.go | 372 ++++++ server/pkg/api/healthcheck.go | 81 ++ server/pkg/api/kex.go | 53 + server/pkg/api/locationtag.go | 88 ++ server/pkg/api/offer.go | 21 + server/pkg/api/passkeys.go | 89 ++ server/pkg/api/public_collection.go | 169 +++ server/pkg/api/push.go | 32 + server/pkg/api/remotestore.go | 51 + server/pkg/api/storage_bonus.go | 49 + server/pkg/api/trash.go | 86 ++ server/pkg/api/user.go | 591 +++++++++ server/pkg/api/userentity.go | 117 ++ server/pkg/controller/access/access.go | 34 + server/pkg/controller/access/collection.go | 59 + server/pkg/controller/access/file.go | 28 + server/pkg/controller/appstore.go | 194 +++ .../controller/authenticator/controller.go | 64 + server/pkg/controller/billing.go | 497 ++++++++ server/pkg/controller/cast/controller.go | 61 + server/pkg/controller/collection.go | 781 ++++++++++++ server/pkg/controller/collection_cast.go | 50 + .../controller/commonbilling/controller.go | 61 + .../pkg/controller/data_cleanup/controller.go | 201 +++ server/pkg/controller/discord/discord.go | 122 ++ .../controller/email/email_notification.go | 158 +++ server/pkg/controller/embedding/controller.go | 273 ++++ server/pkg/controller/family/admin.go | 251 ++++ server/pkg/controller/family/family.go | 141 +++ server/pkg/controller/family/member.go | 122 ++ server/pkg/controller/file.go | 878 +++++++++++++ server/pkg/controller/kex/kex.go | 21 + .../pkg/controller/locationtag/controller.go | 31 + server/pkg/controller/lock/lock.go | 58 + server/pkg/controller/mailing_lists.go | 160 +++ server/pkg/controller/object.go | 126 ++ server/pkg/controller/object_cleanup.go | 684 ++++++++++ server/pkg/controller/offer/offer.go | 116 ++ server/pkg/controller/passkeys.go | 84 ++ server/pkg/controller/playstore.go | 239 ++++ server/pkg/controller/public_collection.go | 338 +++++ server/pkg/controller/push.go | 188 +++ .../pkg/controller/remotestore/controller.go | 36 + server/pkg/controller/replication3.go | 625 ++++++++++ server/pkg/controller/storagebonus/bonus.go | 59 + .../pkg/controller/storagebonus/referral.go | 160 +++ .../controller/storagebonus/referral_cron.go | 58 + server/pkg/controller/stripe.go | 705 +++++++++++ server/pkg/controller/trash.go | 326 +++++ server/pkg/controller/trash_file_metadata.go | 94 ++ server/pkg/controller/usage.go | 107 ++ server/pkg/controller/user/jwt.go | 50 + server/pkg/controller/user/srp.go | 229 ++++ server/pkg/controller/user/twofactor.go | 200 +++ server/pkg/controller/user/user.go | 411 ++++++ server/pkg/controller/user/user_delete.go | 91 ++ server/pkg/controller/user/user_details.go | 170 +++ server/pkg/controller/user/userauth.go | 405 ++++++ server/pkg/controller/usercache/controller.go | 40 + server/pkg/controller/usercache/count.go | 27 + .../pkg/controller/userentity/controller.go | 64 + server/pkg/external/wasabi/compliance.go | 795 ++++++++++++ server/pkg/external/zoho/api.go | 155 +++ server/pkg/middleware/access_token.go | 187 +++ server/pkg/middleware/auth.go | 88 ++ server/pkg/middleware/cast_auth.go | 53 + server/pkg/middleware/rate_limit.go | 121 ++ server/pkg/middleware/recover.go | 88 ++ server/pkg/middleware/request_logger.go | 108 ++ server/pkg/repo/authenticator/entity.go | 116 ++ server/pkg/repo/authenticator/key.go | 41 + server/pkg/repo/authenticator/repository.go | 11 + server/pkg/repo/billing.go | 126 ++ server/pkg/repo/cast/repo.go | 117 ++ server/pkg/repo/collection.go | 1046 ++++++++++++++++ server/pkg/repo/datacleanup/repository.go | 133 ++ server/pkg/repo/embedding/repository.go | 98 ++ server/pkg/repo/family.go | 235 ++++ server/pkg/repo/file.go | 775 ++++++++++++ server/pkg/repo/file_size.go | 83 ++ server/pkg/repo/kex/repository.go | 80 ++ server/pkg/repo/locationtag/repository.go | 89 ++ server/pkg/repo/notificationhistory.go | 31 + server/pkg/repo/object.go | 204 +++ server/pkg/repo/object_cleanup.go | 127 ++ server/pkg/repo/object_copies.go | 182 +++ server/pkg/repo/passkey/credential.go | 76 ++ server/pkg/repo/passkey/passkey.go | 483 ++++++++ server/pkg/repo/passkey/session.go | 36 + server/pkg/repo/public_collection.go | 192 +++ server/pkg/repo/push.go | 62 + server/pkg/repo/queue.go | 135 ++ server/pkg/repo/remotestore/repository.go | 42 + server/pkg/repo/srp.go | 192 +++ server/pkg/repo/storagebonus/bf_addon.go | 34 + server/pkg/repo/storagebonus/bonus.go | 102 ++ server/pkg/repo/storagebonus/bonus_test.go | 25 + .../pkg/repo/storagebonus/referral_codes.go | 55 + .../repo/storagebonus/referral_codes_test.go | 75 ++ .../repo/storagebonus/referral_tracking.go | 172 +++ server/pkg/repo/storagebonus/repo.go | 18 + server/pkg/repo/storagebonus/repo_test.go | 67 + server/pkg/repo/tasklock.go | 87 ++ server/pkg/repo/trash.go | 443 +++++++ server/pkg/repo/twofactor.go | 126 ++ server/pkg/repo/usage.go | 66 + server/pkg/repo/user.go | 398 ++++++ server/pkg/repo/userauth.go | 174 +++ server/pkg/repo/userentity/data.go | 119 ++ server/pkg/repo/userentity/key.go | 30 + server/pkg/repo/userentity/repository.go | 11 + server/pkg/utils/array/array.go | 49 + server/pkg/utils/auth/auth.go | 137 ++ server/pkg/utils/billing/billing.go | 156 +++ server/pkg/utils/byteMarshaller/b64.go | 30 + server/pkg/utils/byteMarshaller/byte.go | 28 + server/pkg/utils/config/config.go | 167 +++ server/pkg/utils/crypto/crypto.go | 57 + server/pkg/utils/email/email.go | 99 ++ server/pkg/utils/file/file.go | 40 + server/pkg/utils/handler/handler.go | 106 ++ server/pkg/utils/network/network.go | 36 + server/pkg/utils/random/generate.go | 15 + server/pkg/utils/recover/wrap.go | 30 + server/pkg/utils/recover/wrap_test.go | 52 + server/pkg/utils/s3config/s3config.go | 207 ++++ server/pkg/utils/string/string.go | 10 + server/pkg/utils/time/time.go | 97 ++ server/scripts/compose/credentials.yaml | 28 + server/scripts/compose/minio-provision.sh | 16 + server/scripts/images/museum.png | Bin 0 -> 104994 bytes server/scripts/lint.sh | 12 + server/scripts/museum.service | 23 + .../README.md | 19 + .../main.go | 89 ++ server/tools/gen-random-keys/README.md | 9 + server/tools/gen-random-keys/main.go | 39 + server/tools/pkg/cli/aws.go | 92 ++ server/tools/test-wasabi-compliance/README.md | 30 + server/tools/test-wasabi-compliance/main.go | 305 +++++ 377 files changed, 37454 insertions(+) create mode 100644 server/.air.toml create mode 100644 server/.dockerignore create mode 100644 server/.github/ISSUE_TEMPLATE/feature_request.md create mode 100644 server/.github/pull_request_template.md create mode 100644 server/.github/workflows/dev-ci.yml create mode 100644 server/.github/workflows/pr.yml create mode 100644 server/.github/workflows/prod-ci.yml create mode 100644 server/.gitignore create mode 100644 server/Dockerfile create mode 100644 server/LICENSE create mode 100644 server/README.md create mode 100644 server/RUNNING.md create mode 100644 server/SECURITY.md create mode 100644 server/cmd/museum/main.go create mode 100644 server/compose.yaml create mode 100644 server/configurations/local.yaml create mode 100644 server/configurations/production.yaml create mode 100644 server/ente/access.go create mode 100644 server/ente/admin.go create mode 100644 server/ente/app.go create mode 100644 server/ente/authenticator/authenticator.go create mode 100644 server/ente/billing.go create mode 100644 server/ente/cache/user_data_cache.go create mode 100644 server/ente/cast/entity.go create mode 100644 server/ente/collection.go create mode 100644 server/ente/data_cleanup/entity.go create mode 100644 server/ente/details/userdetails.go create mode 100644 server/ente/email.go create mode 100644 server/ente/embedding.go create mode 100644 server/ente/errors.go create mode 100644 server/ente/family.go create mode 100644 server/ente/file.go create mode 100644 server/ente/jwt/jwt.go create mode 100644 server/ente/kex.go create mode 100644 server/ente/locationtag.go create mode 100644 server/ente/offer.go create mode 100644 server/ente/passkey.go create mode 100644 server/ente/passkeyCredential.go create mode 100644 server/ente/public_collection.go create mode 100644 server/ente/push.go create mode 100644 server/ente/remotestore.go create mode 100644 server/ente/srp.go create mode 100644 server/ente/storagebonus/errors.go create mode 100644 server/ente/storagebonus/referral.go create mode 100644 server/ente/storagebonus/referral_type.go create mode 100644 server/ente/storagebonus/storge_bonus.go create mode 100644 server/ente/trash.go create mode 100644 server/ente/user.go create mode 100644 server/ente/userentity/entity.go create mode 100644 server/ente/webauthnSession.go create mode 100644 server/go.mod create mode 100644 server/go.sum create mode 100644 server/mail-templates/account_deleted.html create mode 100644 server/mail-templates/account_deleted_active_sub.html create mode 100644 server/mail-templates/email_changed.html create mode 100644 server/mail-templates/family_accepted.html create mode 100644 server/mail-templates/family_invited.html create mode 100644 server/mail-templates/family_left.html create mode 100644 server/mail-templates/family_removed.html create mode 100644 server/mail-templates/files_collected.html create mode 100644 server/mail-templates/mobile_app_first_upload.html create mode 100644 server/mail-templates/on_hold.html create mode 100644 server/mail-templates/ott_auth.html create mode 100644 server/mail-templates/ott_change_email.html create mode 100644 server/mail-templates/ott_photos.html create mode 100644 server/mail-templates/report_alert.html create mode 100644 server/mail-templates/report_limit_exceeded_alert.html create mode 100644 server/mail-templates/storage_limit_exceeded.html create mode 100644 server/mail-templates/subscription_ended.html create mode 100644 server/mail-templates/subscription_upgraded.html create mode 100644 server/mail-templates/successful_referral.html create mode 100644 server/mail-templates/web_app_first_upload.html create mode 100644 server/migrations/10_update_temp_object_keys.down.sql create mode 100644 server/migrations/10_update_temp_object_keys.up.sql create mode 100644 server/migrations/11_remove_kek_hash_constraint.down.sql create mode 100644 server/migrations/11_remove_kek_hash_constraint.up.sql create mode 100644 server/migrations/12_add_hash_limits.down.sql create mode 100644 server/migrations/12_add_hash_limits.up.sql create mode 100644 server/migrations/13_add_recovery_key.down.sql create mode 100644 server/migrations/13_add_recovery_key.up.sql create mode 100644 server/migrations/14_add_user_agent.down.sql create mode 100644 server/migrations/14_add_user_agent.up.sql create mode 100644 server/migrations/15_update_subscriptions.down.sql create mode 100644 server/migrations/15_update_subscriptions.up.sql create mode 100644 server/migrations/16_remove_deleted_file_objects.down.sql create mode 100644 server/migrations/16_remove_deleted_file_objects.up.sql create mode 100644 server/migrations/17_add_notification_history_table.down.sql create mode 100644 server/migrations/17_add_notification_history_table.up.sql create mode 100644 server/migrations/18_update_size_column.down.sql create mode 100644 server/migrations/18_update_size_column.up.sql create mode 100644 server/migrations/19_add_encrypted_email_columns.down.sql create mode 100644 server/migrations/19_add_encrypted_email_columns.up.sql create mode 100644 server/migrations/1_create_tables.down.sql create mode 100644 server/migrations/1_create_tables.up.sql create mode 100644 server/migrations/20_recompute_usage.down.sql create mode 100644 server/migrations/20_recompute_usage.up.sql create mode 100644 server/migrations/21_add_two_factor.down.sql create mode 100644 server/migrations/21_add_two_factor.up.sql create mode 100644 server/migrations/22_add_location_tag_table.down.sql create mode 100644 server/migrations/22_add_location_tag_table.up.sql create mode 100644 server/migrations/23_update_mem_limit_column.down.sql create mode 100644 server/migrations/23_update_mem_limit_column.up.sql create mode 100644 server/migrations/24_bump_ids.down.sql create mode 100644 server/migrations/24_bump_ids.up.sql create mode 100644 server/migrations/25_alter_ids_to_bigint.down.sql create mode 100644 server/migrations/25_alter_ids_to_bigint.up.sql create mode 100644 server/migrations/26_update_ott_and_twofactor.down.sql create mode 100644 server/migrations/26_update_ott_and_twofactor.up.sql create mode 100644 server/migrations/27_consolidate_files_table.down.sql create mode 100644 server/migrations/27_consolidate_files_table.up.sql create mode 100644 server/migrations/28_queue_time_stamp.down.sql create mode 100644 server/migrations/28_queue_time_stamp.up.sql create mode 100644 server/migrations/29_drop_unencrypted_email.down.sql create mode 100644 server/migrations/29_drop_unencrypted_email.up.sql create mode 100644 server/migrations/2_create_subscriptions.down.sql create mode 100644 server/migrations/2_create_subscriptions.up.sql create mode 100644 server/migrations/30_add_magic_metadata.up.sql create mode 100644 server/migrations/30_drop_magic_metadata.down.sql create mode 100644 server/migrations/31_add_pub_magic_metadata.up.sql create mode 100644 server/migrations/31_drop_pub_magic_metadata.down.sql create mode 100644 server/migrations/32_add_trash_table.down.sql create mode 100644 server/migrations/32_add_trash_table.up.sql create mode 100644 server/migrations/33_change_update_at_column_trigger.down.sql create mode 100644 server/migrations/33_change_update_at_column_trigger.up.sql create mode 100644 server/migrations/34_trash_delete_by_idx.down.sql create mode 100644 server/migrations/34_trash_delete_by_idx.up.sql create mode 100644 server/migrations/35_add_push_tokens_table.down.sql create mode 100644 server/migrations/35_add_push_tokens_table.up.sql create mode 100644 server/migrations/36_update_key_attr_and_tokens.down.sql create mode 100644 server/migrations/36_update_key_attr_and_tokens.up.sql create mode 100644 server/migrations/37_public_collection.down.sql create mode 100644 server/migrations/37_public_collection.up.sql create mode 100644 server/migrations/38_add_abuse_report_detail_json.down.sql create mode 100644 server/migrations/38_add_abuse_report_detail_json.up.sql create mode 100644 server/migrations/39_drop_file_and_thumbnail_object_keys.down.sql create mode 100644 server/migrations/39_drop_file_and_thumbnail_object_keys.up.sql create mode 100644 server/migrations/3_add_encypted_collection_name.down.sql create mode 100644 server/migrations/3_add_encypted_collection_name.up.sql create mode 100644 server/migrations/40_advance_url_settings.down.sql create mode 100644 server/migrations/40_advance_url_settings.up.sql create mode 100644 server/migrations/41_add_file_info.down.sql create mode 100644 server/migrations/41_add_file_info.up.sql create mode 100644 server/migrations/42_collection_add_magic_metadata.up.sql create mode 100644 server/migrations/42_collection_drop_magic_metadata.down.sql create mode 100644 server/migrations/43_family_plan.down.sql create mode 100644 server/migrations/43_family_plan.up.sql create mode 100644 server/migrations/44_trash_add_index.down.sql create mode 100644 server/migrations/44_trash_add_index.up.sql create mode 100644 server/migrations/45_collection_files_file_id_idx.down.sql create mode 100644 server/migrations/45_collection_files_file_id_idx.up.sql create mode 100644 server/migrations/46_replication_v2.down.sql create mode 100644 server/migrations/46_replication_v2.up.sql create mode 100644 server/migrations/47_wasabi_integration.down.sql create mode 100644 server/migrations/47_wasabi_integration.up.sql create mode 100644 server/migrations/48_add_stripe_account.down.sql create mode 100644 server/migrations/48_add_stripe_account.up.sql create mode 100644 server/migrations/49_add_unique_idx_txn_id.down.sql create mode 100644 server/migrations/49_add_unique_idx_txn_id.up.sql create mode 100644 server/migrations/4_update_subscriptions_schema.down.sql create mode 100644 server/migrations/4_update_subscriptions_schema.up.sql create mode 100644 server/migrations/50_remote_store.down.sql create mode 100644 server/migrations/50_remote_store.up.sql create mode 100644 server/migrations/51_th_time_human.down.sql create mode 100644 server/migrations/51_th_time_human.up.sql create mode 100644 server/migrations/52_authenticator.down.sql create mode 100644 server/migrations/52_authenticator.up.sql create mode 100644 server/migrations/53_add_apps.down.sql create mode 100644 server/migrations/53_add_apps.up.sql create mode 100644 server/migrations/54_update_public_tokens.down.sql create mode 100644 server/migrations/54_update_public_tokens.up.sql create mode 100644 server/migrations/55_extend_subs.down.sql create mode 100644 server/migrations/55_extend_subs.up.sql create mode 100644 server/migrations/56_add_uncategorized_type_constraint.down.sql create mode 100644 server/migrations/56_add_uncategorized_type_constraint.up.sql create mode 100644 server/migrations/57_object_copies.down.sql create mode 100644 server/migrations/57_object_copies.up.sql create mode 100644 server/migrations/58_update_ott_unique_constraint.down.sql create mode 100644 server/migrations/58_update_ott_unique_constraint.up.sql create mode 100644 server/migrations/59_delete_data.down.sql create mode 100644 server/migrations/59_delete_data.up.sql create mode 100644 server/migrations/5_rename_storage_in_mbs_to_storage.down.sql create mode 100644 server/migrations/5_rename_storage_in_mbs_to_storage.up.sql create mode 100644 server/migrations/60_add_columns_cf.down.sql create mode 100644 server/migrations/60_add_columns_cf.up.sql create mode 100644 server/migrations/61_storage_bonus.down.sql create mode 100644 server/migrations/61_storage_bonus.up.sql create mode 100644 server/migrations/62_entity_store.down.sql create mode 100644 server/migrations/62_entity_store.up.sql create mode 100644 server/migrations/63_add_kex_store.down.sql create mode 100644 server/migrations/63_add_kex_store.up.sql create mode 100644 server/migrations/64_collection_drop_pub_magic_metadata.down.sql create mode 100644 server/migrations/64_collection_pub_magic_metadata.up.sql create mode 100644 server/migrations/65_shared_collection_drop_magic_metadata.down.sql create mode 100644 server/migrations/65_shared_collection_magic_metadata.up.sql create mode 100644 server/migrations/66_add_srp_attributes.down.sql create mode 100644 server/migrations/66_add_srp_attributes.up.sql create mode 100644 server/migrations/67_add_email_mfa_column.up.sql create mode 100644 server/migrations/67_email_mfa_column.down.sql create mode 100644 server/migrations/68_add_locker_changes.down.sql create mode 100644 server/migrations/68_add_locker_changes.up.sql create mode 100644 server/migrations/69_add_srp_updated_at_trigger.down.sql create mode 100644 server/migrations/69_add_srp_updated_at_trigger.up.sql create mode 100644 server/migrations/6_create_usage.down.sql create mode 100644 server/migrations/6_create_usage.up.sql create mode 100644 server/migrations/70_add_embeddings.down.sql create mode 100644 server/migrations/70_add_embeddings.up.sql create mode 100644 server/migrations/71_user_source_attr.down.sql create mode 100644 server/migrations/71_user_source_attr.up.sql create mode 100644 server/migrations/72_add_on_storage.down.sql create mode 100644 server/migrations/72_add_on_storage.up.sql create mode 100644 server/migrations/73_remove_kex_user_id_fk.down.sql create mode 100644 server/migrations/73_remove_kex_user_id_fk.up.sql create mode 100644 server/migrations/74_add_onnx.down.sql create mode 100644 server/migrations/74_add_onnx.up.sql create mode 100644 server/migrations/75_add_passkeys.down.sql create mode 100644 server/migrations/75_add_passkeys.up.sql create mode 100644 server/migrations/76_add_passkey_login_sessions.down.sql create mode 100644 server/migrations/76_add_passkey_login_sessions.up.sql create mode 100644 server/migrations/77_embeddings_table_update.down.sql create mode 100644 server/migrations/77_embeddings_table_update.up.sql create mode 100644 server/migrations/78_cast.down.sql create mode 100644 server/migrations/78_cast.up.sql create mode 100644 server/migrations/79_queue_index.down.sql create mode 100644 server/migrations/79_queue_index.up.sql create mode 100644 server/migrations/7_add_unique_usage_user_id.down.sql create mode 100644 server/migrations/7_add_unique_usage_user_id.up.sql create mode 100644 server/migrations/8_update_otts_schema_link_with_email.down.sql create mode 100644 server/migrations/8_update_otts_schema_link_with_email.up.sql create mode 100644 server/migrations/9_create_datacenter.down.sql create mode 100644 server/migrations/9_create_datacenter.up.sql create mode 100644 server/pkg/api/admin.go create mode 100644 server/pkg/api/authenticator.go create mode 100644 server/pkg/api/billing.go create mode 100644 server/pkg/api/cast.go create mode 100644 server/pkg/api/collection.go create mode 100644 server/pkg/api/embedding.go create mode 100644 server/pkg/api/family.go create mode 100644 server/pkg/api/file.go create mode 100644 server/pkg/api/healthcheck.go create mode 100644 server/pkg/api/kex.go create mode 100644 server/pkg/api/locationtag.go create mode 100644 server/pkg/api/offer.go create mode 100644 server/pkg/api/passkeys.go create mode 100644 server/pkg/api/public_collection.go create mode 100644 server/pkg/api/push.go create mode 100644 server/pkg/api/remotestore.go create mode 100644 server/pkg/api/storage_bonus.go create mode 100644 server/pkg/api/trash.go create mode 100644 server/pkg/api/user.go create mode 100644 server/pkg/api/userentity.go create mode 100644 server/pkg/controller/access/access.go create mode 100644 server/pkg/controller/access/collection.go create mode 100644 server/pkg/controller/access/file.go create mode 100644 server/pkg/controller/appstore.go create mode 100644 server/pkg/controller/authenticator/controller.go create mode 100644 server/pkg/controller/billing.go create mode 100644 server/pkg/controller/cast/controller.go create mode 100644 server/pkg/controller/collection.go create mode 100644 server/pkg/controller/collection_cast.go create mode 100644 server/pkg/controller/commonbilling/controller.go create mode 100644 server/pkg/controller/data_cleanup/controller.go create mode 100644 server/pkg/controller/discord/discord.go create mode 100644 server/pkg/controller/email/email_notification.go create mode 100644 server/pkg/controller/embedding/controller.go create mode 100644 server/pkg/controller/family/admin.go create mode 100644 server/pkg/controller/family/family.go create mode 100644 server/pkg/controller/family/member.go create mode 100644 server/pkg/controller/file.go create mode 100644 server/pkg/controller/kex/kex.go create mode 100644 server/pkg/controller/locationtag/controller.go create mode 100644 server/pkg/controller/lock/lock.go create mode 100644 server/pkg/controller/mailing_lists.go create mode 100644 server/pkg/controller/object.go create mode 100644 server/pkg/controller/object_cleanup.go create mode 100644 server/pkg/controller/offer/offer.go create mode 100644 server/pkg/controller/passkeys.go create mode 100644 server/pkg/controller/playstore.go create mode 100644 server/pkg/controller/public_collection.go create mode 100644 server/pkg/controller/push.go create mode 100644 server/pkg/controller/remotestore/controller.go create mode 100644 server/pkg/controller/replication3.go create mode 100644 server/pkg/controller/storagebonus/bonus.go create mode 100644 server/pkg/controller/storagebonus/referral.go create mode 100644 server/pkg/controller/storagebonus/referral_cron.go create mode 100644 server/pkg/controller/stripe.go create mode 100644 server/pkg/controller/trash.go create mode 100644 server/pkg/controller/trash_file_metadata.go create mode 100644 server/pkg/controller/usage.go create mode 100644 server/pkg/controller/user/jwt.go create mode 100644 server/pkg/controller/user/srp.go create mode 100644 server/pkg/controller/user/twofactor.go create mode 100644 server/pkg/controller/user/user.go create mode 100644 server/pkg/controller/user/user_delete.go create mode 100644 server/pkg/controller/user/user_details.go create mode 100644 server/pkg/controller/user/userauth.go create mode 100644 server/pkg/controller/usercache/controller.go create mode 100644 server/pkg/controller/usercache/count.go create mode 100644 server/pkg/controller/userentity/controller.go create mode 100644 server/pkg/external/wasabi/compliance.go create mode 100644 server/pkg/external/zoho/api.go create mode 100644 server/pkg/middleware/access_token.go create mode 100644 server/pkg/middleware/auth.go create mode 100644 server/pkg/middleware/cast_auth.go create mode 100644 server/pkg/middleware/rate_limit.go create mode 100644 server/pkg/middleware/recover.go create mode 100644 server/pkg/middleware/request_logger.go create mode 100644 server/pkg/repo/authenticator/entity.go create mode 100644 server/pkg/repo/authenticator/key.go create mode 100644 server/pkg/repo/authenticator/repository.go create mode 100644 server/pkg/repo/billing.go create mode 100644 server/pkg/repo/cast/repo.go create mode 100644 server/pkg/repo/collection.go create mode 100644 server/pkg/repo/datacleanup/repository.go create mode 100644 server/pkg/repo/embedding/repository.go create mode 100644 server/pkg/repo/family.go create mode 100644 server/pkg/repo/file.go create mode 100644 server/pkg/repo/file_size.go create mode 100644 server/pkg/repo/kex/repository.go create mode 100644 server/pkg/repo/locationtag/repository.go create mode 100644 server/pkg/repo/notificationhistory.go create mode 100644 server/pkg/repo/object.go create mode 100644 server/pkg/repo/object_cleanup.go create mode 100644 server/pkg/repo/object_copies.go create mode 100644 server/pkg/repo/passkey/credential.go create mode 100644 server/pkg/repo/passkey/passkey.go create mode 100644 server/pkg/repo/passkey/session.go create mode 100644 server/pkg/repo/public_collection.go create mode 100644 server/pkg/repo/push.go create mode 100644 server/pkg/repo/queue.go create mode 100644 server/pkg/repo/remotestore/repository.go create mode 100644 server/pkg/repo/srp.go create mode 100644 server/pkg/repo/storagebonus/bf_addon.go create mode 100644 server/pkg/repo/storagebonus/bonus.go create mode 100644 server/pkg/repo/storagebonus/bonus_test.go create mode 100644 server/pkg/repo/storagebonus/referral_codes.go create mode 100644 server/pkg/repo/storagebonus/referral_codes_test.go create mode 100644 server/pkg/repo/storagebonus/referral_tracking.go create mode 100644 server/pkg/repo/storagebonus/repo.go create mode 100644 server/pkg/repo/storagebonus/repo_test.go create mode 100644 server/pkg/repo/tasklock.go create mode 100644 server/pkg/repo/trash.go create mode 100644 server/pkg/repo/twofactor.go create mode 100644 server/pkg/repo/usage.go create mode 100644 server/pkg/repo/user.go create mode 100644 server/pkg/repo/userauth.go create mode 100644 server/pkg/repo/userentity/data.go create mode 100644 server/pkg/repo/userentity/key.go create mode 100644 server/pkg/repo/userentity/repository.go create mode 100644 server/pkg/utils/array/array.go create mode 100644 server/pkg/utils/auth/auth.go create mode 100644 server/pkg/utils/billing/billing.go create mode 100644 server/pkg/utils/byteMarshaller/b64.go create mode 100644 server/pkg/utils/byteMarshaller/byte.go create mode 100644 server/pkg/utils/config/config.go create mode 100644 server/pkg/utils/crypto/crypto.go create mode 100644 server/pkg/utils/email/email.go create mode 100644 server/pkg/utils/file/file.go create mode 100644 server/pkg/utils/handler/handler.go create mode 100644 server/pkg/utils/network/network.go create mode 100644 server/pkg/utils/random/generate.go create mode 100644 server/pkg/utils/recover/wrap.go create mode 100644 server/pkg/utils/recover/wrap_test.go create mode 100644 server/pkg/utils/s3config/s3config.go create mode 100644 server/pkg/utils/string/string.go create mode 100644 server/pkg/utils/time/time.go create mode 100644 server/scripts/compose/credentials.yaml create mode 100755 server/scripts/compose/minio-provision.sh create mode 100644 server/scripts/images/museum.png create mode 100755 server/scripts/lint.sh create mode 100644 server/scripts/museum.service create mode 100644 server/tools/abort-unfinished-multipart-uploads/README.md create mode 100644 server/tools/abort-unfinished-multipart-uploads/main.go create mode 100644 server/tools/gen-random-keys/README.md create mode 100644 server/tools/gen-random-keys/main.go create mode 100644 server/tools/pkg/cli/aws.go create mode 100644 server/tools/test-wasabi-compliance/README.md create mode 100644 server/tools/test-wasabi-compliance/main.go diff --git a/server/.air.toml b/server/.air.toml new file mode 100644 index 000000000..068d38cdd --- /dev/null +++ b/server/.air.toml @@ -0,0 +1,32 @@ +root = "." +tmp_dir = "tmp" + +[build] + bin = "./tmp/main" + cmd = "go build -o ./tmp ./cmd/museum/main.go" + delay = 1000 + exclude_dir = ["assets", "tmp", "vendor"] + exclude_file = [] + exclude_regex = [] + exclude_unchanged = false + follow_symlink = false + full_bin = "./tmp/main" + include_dir = [] + include_ext = ["go", "tpl", "tmpl", "html"] + kill_delay = "0s" + log = "build-errors.log" + send_interrupt = false + stop_on_error = true + +[color] + app = "" + build = "yellow" + main = "magenta" + runner = "green" + watcher = "cyan" + +[log] + time = false + +[misc] + clean_on_exit = false diff --git a/server/.dockerignore b/server/.dockerignore new file mode 100644 index 000000000..6b8710a71 --- /dev/null +++ b/server/.dockerignore @@ -0,0 +1 @@ +.git diff --git a/server/.github/ISSUE_TEMPLATE/feature_request.md b/server/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 000000000..30d8506c7 --- /dev/null +++ b/server/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,14 @@ +--- +name: Feature request +about: Suggest an idea for this project +title: '' +labels: '' +assignees: '' + +--- + +### Problem Statement + +### Proposed Solution + +### Caveats diff --git a/server/.github/pull_request_template.md b/server/.github/pull_request_template.md new file mode 100644 index 000000000..dba8023c4 --- /dev/null +++ b/server/.github/pull_request_template.md @@ -0,0 +1,3 @@ +## Description + +## Test Plan diff --git a/server/.github/workflows/dev-ci.yml b/server/.github/workflows/dev-ci.yml new file mode 100644 index 000000000..8900f5369 --- /dev/null +++ b/server/.github/workflows/dev-ci.yml @@ -0,0 +1,28 @@ +name: Dev CI + +on: + workflow_dispatch: + # Enable manual run + push: + # Sequence of patterns matched against refs/tags + tags: + - "v*" # Push events to matching v*, i.e. v4.2.0 + +jobs: + build: + # This job will run on ubuntu virtual machine + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + name: Check out code + + - uses: mr-smithers-excellent/docker-build-push@v6 + name: Build & Push + with: + image: ente/museum-dev + registry: rg.fr-par.scw.cloud + enableBuildKit: true + buildArgs: GIT_COMMIT=${GITHUB_SHA} + tags: ${GITHUB_SHA}, latest + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} diff --git a/server/.github/workflows/pr.yml b/server/.github/workflows/pr.yml new file mode 100644 index 000000000..59ddb5fd6 --- /dev/null +++ b/server/.github/workflows/pr.yml @@ -0,0 +1,21 @@ +name: Code quality + +on: + # Enable manual run + workflow_dispatch: + # Run on every push; this also covers pull requests + push: + +jobs: + build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: actions/setup-go@v5 + with: + go-version-file: "go.mod" + cache: true + - run: sudo apt-get update && sudo apt-get install libsodium-dev + - run: + "./scripts/lint.sh" + # - run: "go test ./..." diff --git a/server/.github/workflows/prod-ci.yml b/server/.github/workflows/prod-ci.yml new file mode 100644 index 000000000..791e5218c --- /dev/null +++ b/server/.github/workflows/prod-ci.yml @@ -0,0 +1,28 @@ +name: Prod CI + +on: + workflow_dispatch: + # Enable manual run + push: + # Sequence of patterns matched against refs/tags + tags: + - "v*" # Push events to matching v*, i.e. v4.2.0 + +jobs: + build: + # This job will run on ubuntu virtual machine + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + name: Check out code + + - uses: mr-smithers-excellent/docker-build-push@v6 + name: Build & Push + with: + image: ente/museum-prod + registry: rg.fr-par.scw.cloud + enableBuildKit: true + buildArgs: GIT_COMMIT=${GITHUB_SHA} + tags: ${GITHUB_SHA}, latest + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} diff --git a/server/.gitignore b/server/.gitignore new file mode 100644 index 000000000..3c7fd7901 --- /dev/null +++ b/server/.gitignore @@ -0,0 +1,10 @@ +data/** +.DS_Store +Photos.code-workspace +logs/** +.idea/** +.vscode/** +tmp/** +museum.yaml +bin/** +data/ diff --git a/server/Dockerfile b/server/Dockerfile new file mode 100644 index 000000000..25d5bb0ff --- /dev/null +++ b/server/Dockerfile @@ -0,0 +1,26 @@ +FROM golang:1.20-alpine3.17 as builder +RUN apk add --no-cache gcc musl-dev git build-base pkgconfig libsodium-dev + +ENV GOOS=linux + +WORKDIR /etc/ente/ + +COPY go.mod . +COPY go.sum . +RUN go mod download + +COPY . . +RUN --mount=type=cache,target=/root/.cache/go-build \ + go build -o museum cmd/museum/main.go + +FROM alpine:3.17 +RUN apk add libsodium-dev +COPY --from=builder /etc/ente/museum . +COPY configurations configurations +COPY migrations migrations +COPY mail-templates mail-templates + +ARG GIT_COMMIT +ENV GIT_COMMIT=$GIT_COMMIT + +CMD ["./museum"] diff --git a/server/LICENSE b/server/LICENSE new file mode 100644 index 000000000..bae94e189 --- /dev/null +++ b/server/LICENSE @@ -0,0 +1,661 @@ + GNU AFFERO GENERAL PUBLIC LICENSE + Version 3, 19 November 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + + A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + + The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + + An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing under +this license. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU Affero General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Remote Network Interaction; Use with the GNU General Public License. + + Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your version +supports such interaction) an opportunity to receive the Corresponding +Source of your version by providing access to the Corresponding Source +from a network server at no charge, through some standard or customary +means of facilitating copying of software. This Corresponding Source +shall include the Corresponding Source for any work covered by version 3 +of the GNU General Public License that is incorporated pursuant to the +following paragraph. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If your software can interact with users remotely through a computer +network, you should also make sure that it provides a way for users to +get its source. For example, if your program is a web application, its +interface could display a "Source" link that leads users to an archive +of the code. There are many ways you could offer source, and different +solutions will be better for different programs; see section 13 for the +specific requirements. + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU AGPL, see +. \ No newline at end of file diff --git a/server/README.md b/server/README.md new file mode 100644 index 000000000..2a7becc32 --- /dev/null +++ b/server/README.md @@ -0,0 +1,121 @@ +# Museum + +API server for [ente.io](https://ente.io) + +![Museum's role in Ente's architecture](scripts/images/museum.png) + +We named our server _museum_ because for us and our customers, personal photos +are worth more than any other piece of art. + +Both Ente Photos and Ente Auth use the same server (intentionally). This allows +users to use the same credentials to store different types of end-to-end +encrypted data without needing to create new accounts. We plan on building more +apps using the same server – this is easy, because the server is already data +agnostic (since the data is end-to-end encrypted). + +## Getting started + +Start a local cluster + + docker compose up --build + +And that's it! + +You can now make API requests to localhost, for example + + curl http://localhost:8080/ping + +Let's try changing the message to get the hang of things. Open `healthcheck.go`, +change `"pong"` to `"kong"`, stop the currently running cluster (`Ctrl-c`), and +then rerun it + + docker compose up --build + +And ping again + + curl http://localhost:8080/ping + +This time you'll see the updated message. + +For more details about how to get museum up and running, see +[RUNNING.md](/RUNNING.md). + +## Architecture + +With the mechanics of running museum out of the way, let us revisit the diagram +we saw earlier. + +It is a long term goal of ours to make museum redundant. The beauty of an +end-to-end encrypted architecture is that the service provider has no special +conceptual role. The user has full ownership of the data at all points, and +using suitably advanced clients the cloud storage and replication can be +abstracted away, or be handled in a completely decentralized manner. + +Until we get there, museum serves as an assistant for various housekeeping +chores. + +* Clients ([mobile](../mobile), [web](../web) and [desktop](../desktop)) connect + to museum on the user's behalf. Museum then proxies data access (after adding + yet another layer of authentication on top of the user's master password), + performs billing related functions, and triggers replication of encrypted user + data. + +* The end-to-end encrypted cryptography that powers all this is [documented + here](https://ente.io/architecture) + +* Details about the 3 (yes 3!) clouds where the encrypted data and database are + replicated to are [documented here](https://ente.io/reliability) + +Museum's architecture is generic enough to support arbitrary end-to-end +encrypted storage. While we're currently focusing on building a great photo +storage and sharing experience, that's not a limit. For example, we already use +museum to also provide an [end-to-end encrypted open source 2FA app with cloud +backups](../auth). + +## Self hosting + +Museum is a single self-contained Docker image that is easy to self-host. + +When we write code for museum, the guiding light is simplicity and robustness. +But this also extends to how we approach hosting. Museum is a single statically +compiled binary that can be put anywhere and directly run. + +And it is built with containerization in mind - both during development and +deployment. Just use the provided Dockerfile, configure to taste and you're off +to the races. + +> [!CAUTION] +> +> We don't publish any official docker images (yet). For self-hosters, the +> recommendation is to build your own image using the provided `Dockerfile`. + +Everything that you might needed to run museum is all in here, since this is the +setup we ourselves use in production. + +> [!TIP] +> +> On our production servers, we wrap museum in a [systemd +> service](scripts/museum.service). Our production machines are vanilla Ubuntu +> images, with Docker and Promtail installed. We then plonk in this systemd +> service, and use `systemctl start|stop|status museum` to herd it around. + +Some people new to Docker/Go/Postgres might have general questions though. +Unfortunately, because of limited engineering bandwidth **we will currently not be +able to prioritize support queries related to self hosting**, and we request you +to please not open issues around self hosting for the time being (feel free to +create discussions though). The best way to summarize the status of self hosting +is – **everything you need is here, but it is perhaps not readily documented, or +flexible enough.** + +That said, we hope community members help each other out, e.g. in this +repository's [Discussions](https://github.com/ente-io/ente/discussions), or on +[our Discord](https://discord.gg/z2YVKkycX3). And whenever time permits, we will +try to clarify, and also document such FAQs. Please feel free to open +documentation PRs around this too. + +## Thanks ❤️ + +We've had great fun with this combination (Golang + Postgres + Docker), and we +hope you also have fun tinkering with it too. A big thank you to all the people who've +put in decades of work behind these great technologies. Truly, on the shoulders +of giants we stand. diff --git a/server/RUNNING.md b/server/RUNNING.md new file mode 100644 index 000000000..22045fe2b --- /dev/null +++ b/server/RUNNING.md @@ -0,0 +1,185 @@ +# Running Museum + +You can run a Docker compose cluster containing museum and the essential +auxiliary services it requires (database and object storage). This is the +easiest and simplest way to get started, and also provides an isolated +environment that doesn't clutter your machine. + +You can also run museum directly on your machine if you wish - it is a single +static go binary. + +This document describes both these approaches, and also outlines configuration. + +- [Running using Docker](#docker) +- [Running without Docker](#without-docker) +- [Configuration](#configuration) + +## Docker + +Start the cluster + + docker compose up --build + +Once the cluster has started, you should be able to do call museum + + curl http://localhost:8080/ping + +Or connect from the [web app](../web) + + NEXT_PUBLIC_ENTE_ENDPOINT=http://localhost:8080 yarn dev + +Or connect from the [mobile app](../mobile) + + flutter run --dart-define=endpoint=http://localhost:8080 + +Or interact with the other services in the cluster, e.g. connect to the DB + + docker compose exec postgres env PGPASSWORD=pgpass psql -U pguser -d ente_db + +Or interact with the MinIO S3 API + + AWS_ACCESS_KEY_ID=test AWS_SECRET_ACCESS_KEY=testtest \ + aws s3 --endpoint-url http://localhost:3200 ls s3://test + +Or open the MinIO dashboard at (user: test/password: testtest). + +> [!NOTE] +> +> If something seems amiss, ensure that Docker has read access to the parent +> folder so that it can access credentials.yaml and other local files. On macOS, +> you can do this by going to System Settings > Security & Privacy > Files and +> Folders > Docker. + +### Cleanup + +Persistent data is stored in Docker volumes and will persist across container +restarts. The volume can be saved / inspected using the `docker volumes` +command. + +To remove stopped containers, use `docker compose rm`. To also remove volumes, +use `docker compose down -v`. + +### Multiple clusters + +You can spin up independent clusters, each with its own volumes, by using the +`-p` Docker Compose flag to specify different project names for each one. + +### Pruning images + +Each time museum gets rebuilt from source, a new image gets created but the old +one is retained as a dangling image. You can use `docker image prune --force`, +or `docker system prune` if that's fine with you, to remove these. + +## Without Docker + +The museum binary can be run by using `go run cmd/museum/main.go`. But first, +you'll need to prepare your machine for development. Here we give the steps, +with examples that work for macOS (please adapt to your OS). + +### Install [Go](https://golang.org/dl/) + +```sh +brew tap homebrew/core +brew upgrade +brew install go +``` + +### Install other packages + +```sh +brew install postgresql@12 +brew install libsodium +brew install pkg-config +``` + +> [!NOTE] +> +> Here we install same major version of Postgres as our production database to +> avoid surprises, but if you're using a newer Postgres that should work fine +> too. + + +On M1 macs, we additionally need to link the postgres keg. + +``` +brew link postgresql@12 +``` + +### Init Postgres database + +Homebrew already creates a default database cluster for us, but if needed, it +can also be done with the following commands: + +```sh +sudo mkdir -p /usr/local/var/postgres +sudo chmod 775 /usr/local/var/postgres +sudo chown $(whoami) /usr/local/var/postgres +initdb /usr/local/var/postgres +``` + +On M1 macs, the path to the database cluster is +`/opt/homebrew/var/postgresql@12` (instead of `/usr/local/var/postgres`). + +### Start Postgres + +```sh +pg_ctl -D /usr/local/var/postgres -l logfile start +``` + +### Create user + +```sh +createuser -s postgres +``` + +## Start museum + +```sh +export ENTE_DB_USER=postgres +go run cmd/museum/main.go +``` + +For live reloads, install [air](https://github.com/cosmtrek/air#installation). +Then you can just call `air` after declaring the required environment variables. +For example, + +```sh +ENTE_DB_USER=ente_user +air +``` + +## Testing + +Set up a local database for testing. This is not required for running the server. +Create a test database with the following name and credentials: + +```sql +$ psql -U postgres +CREATE DATABASE ente_test_db; +CREATE USER test_user WITH PASSWORD 'test_pass'; +GRANT ALL PRIVILEGES ON DATABASE ente_test_db TO test_user; +``` + +For running the tests, you can use the following command: + +```sh +ENV="test" go test -v ./pkg/... +go clean -testcache && ENV="test" go test -v ./pkg/... +``` + +## Configuration + +Now that you have museum running (either inside Docker or standalone), we can +talk about configuring it. + +By default, museum runs in the "local" configuration using values specified in +`local.yaml`. + +To override these values, you can create a file named `museum.yaml` in the +current directory. This path is git-ignored for convenience. Note that if you +run the Docker compose cluster without creating this file, Docker will create an +empty directory named `museum.yaml` which you can `rmdir` if you need to provide +a config file later on. + +The keys and values supported by this configuration file are documented in +[configurations/local.yaml](configurations/local.yaml). diff --git a/server/SECURITY.md b/server/SECURITY.md new file mode 100644 index 000000000..1642c8307 --- /dev/null +++ b/server/SECURITY.md @@ -0,0 +1,44 @@ +ente believes that working with security researchers across the globe is crucial to keeping our +users safe. If you believe you've found a security issue in our product or service, we encourage you to +notify us (security@ente.io). We welcome working with you to resolve the issue promptly. Thanks in advance! + +# Disclosure Policy + +- Let us know as soon as possible upon discovery of a potential security issue, and we'll make every + effort to quickly resolve the issue. +- Provide us a reasonable amount of time to resolve the issue before any disclosure to the public or a + third-party. We may publicly disclose the issue before resolving it, if appropriate. +- Make a good faith effort to avoid privacy violations, destruction of data, and interruption or + degradation of our service. Only interact with accounts you own or with explicit permission of the + account holder. +- If you would like to encrypt your report, please use the PGP key with long ID + `E273695C0403F34F74171932DF6DDDE98EBD2394` (available in the public keyserver pool). + +# In-scope + +- Security issues in any current release of ente. This includes the web app, desktop app, + and mobile apps (iOS and Android). Product downloads are available at https://ente.io. Source + code is available at https://github.com/ente-io. + +# Exclusions + +The following bug classes are out-of scope: + +- Bugs that are already reported on any of ente's issue trackers (https://github.com/ente-io), + or that we already know of. Note that some of our issue tracking is private. +- Issues in an upstream software dependency (ex: Flutter, Next.js etc) which are already reported to the upstream maintainer. +- Attacks requiring physical access to a user's device. +- Self-XSS +- Issues related to software or protocols not under ente's control +- Vulnerabilities in outdated versions of ente +- Missing security best practices that do not directly lead to a vulnerability +- Issues that do not have any impact on the general public + +While researching, we'd like to ask you to refrain from: + +- Denial of service +- Spamming +- Social engineering (including phishing) of ente staff or contractors +- Any physical attempts against ente property or data centers + +Thank you for helping keep ente and our users safe! diff --git a/server/cmd/museum/main.go b/server/cmd/museum/main.go new file mode 100644 index 000000000..495430a50 --- /dev/null +++ b/server/cmd/museum/main.go @@ -0,0 +1,962 @@ +package main + +import ( + "context" + "database/sql" + b64 "encoding/base64" + "fmt" + "net/http" + "os" + "os/signal" + "path" + "runtime" + "strings" + "syscall" + "time" + + "github.com/ente-io/museum/pkg/controller/cast" + + "github.com/ente-io/museum/pkg/controller/commonbilling" + + cache2 "github.com/ente-io/museum/ente/cache" + "github.com/ente-io/museum/pkg/controller/discord" + "github.com/ente-io/museum/pkg/controller/offer" + "github.com/ente-io/museum/pkg/controller/usercache" + + "github.com/GoKillers/libsodium-go/sodium" + "github.com/dlmiddlecote/sqlstats" + "github.com/ente-io/museum/ente/jwt" + "github.com/ente-io/museum/pkg/api" + "github.com/ente-io/museum/pkg/controller" + "github.com/ente-io/museum/pkg/controller/access" + authenticatorCtrl "github.com/ente-io/museum/pkg/controller/authenticator" + dataCleanupCtrl "github.com/ente-io/museum/pkg/controller/data_cleanup" + "github.com/ente-io/museum/pkg/controller/email" + embeddingCtrl "github.com/ente-io/museum/pkg/controller/embedding" + "github.com/ente-io/museum/pkg/controller/family" + kexCtrl "github.com/ente-io/museum/pkg/controller/kex" + "github.com/ente-io/museum/pkg/controller/locationtag" + "github.com/ente-io/museum/pkg/controller/lock" + remoteStoreCtrl "github.com/ente-io/museum/pkg/controller/remotestore" + "github.com/ente-io/museum/pkg/controller/storagebonus" + "github.com/ente-io/museum/pkg/controller/user" + userEntityCtrl "github.com/ente-io/museum/pkg/controller/userentity" + "github.com/ente-io/museum/pkg/middleware" + "github.com/ente-io/museum/pkg/repo" + authenticatorRepo "github.com/ente-io/museum/pkg/repo/authenticator" + castRepo "github.com/ente-io/museum/pkg/repo/cast" + "github.com/ente-io/museum/pkg/repo/datacleanup" + "github.com/ente-io/museum/pkg/repo/embedding" + "github.com/ente-io/museum/pkg/repo/kex" + locationtagRepo "github.com/ente-io/museum/pkg/repo/locationtag" + "github.com/ente-io/museum/pkg/repo/passkey" + "github.com/ente-io/museum/pkg/repo/remotestore" + storageBonusRepo "github.com/ente-io/museum/pkg/repo/storagebonus" + userEntityRepo "github.com/ente-io/museum/pkg/repo/userentity" + "github.com/ente-io/museum/pkg/utils/billing" + "github.com/ente-io/museum/pkg/utils/config" + "github.com/ente-io/museum/pkg/utils/s3config" + timeUtil "github.com/ente-io/museum/pkg/utils/time" + "github.com/gin-contrib/gzip" + "github.com/gin-contrib/requestid" + "github.com/gin-contrib/timeout" + "github.com/gin-gonic/gin" + "github.com/golang-migrate/migrate/v4" + "github.com/golang-migrate/migrate/v4/database/postgres" + _ "github.com/golang-migrate/migrate/v4/source/file" + _ "github.com/lib/pq" + "github.com/patrickmn/go-cache" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/prometheus/client_golang/prometheus/promhttp" + "github.com/robfig/cron/v3" + log "github.com/sirupsen/logrus" + "github.com/spf13/viper" + ginprometheus "github.com/zsais/go-gin-prometheus" + "gopkg.in/natefinch/lumberjack.v2" +) + +func main() { + environment := os.Getenv("ENVIRONMENT") + if environment == "" { + environment = "local" + } + + err := config.ConfigureViper(environment) + if err != nil { + panic(err) + } + + setupLogger(environment) + log.Infof("Booting up %s server with commit #%s", environment, os.Getenv("GIT_COMMIT")) + + secretEncryptionKey := viper.GetString("key.encryption") + hashingKey := viper.GetString("key.hash") + jwtSecret := viper.GetString("jwt.secret") + + secretEncryptionKeyBytes, err := b64.StdEncoding.DecodeString(secretEncryptionKey) + if err != nil { + log.Fatal("Could not decode email-encryption-key", err) + } + hashingKeyBytes, err := b64.StdEncoding.DecodeString(hashingKey) + if err != nil { + log.Fatal("Could not decode email-hash-key", err) + } + + jwtSecretBytes, err := b64.URLEncoding.DecodeString(jwtSecret) + if err != nil { + log.Fatal("Could not decode jwt-secret ", err) + } + + db := setupDatabase() + defer db.Close() + + sodium.Init() + + hostName, err := os.Hostname() + if err != nil { + log.Fatal("Could not get host name", err) + } + + var latencyLogger = promauto.NewHistogramVec(prometheus.HistogramOpts{ + Name: "museum_method_latency", + Help: "The amount of time each method is taking to respond", + Buckets: []float64{10, 50, 100, 200, 500, 1000, 10000, 30000, 60000, 120000, 600000}, + }, []string{"method"}) + + s3Config := s3config.NewS3Config() + + passkeysRepo, err := passkey.NewRepository(db) + if err != nil { + panic(err) + } + + storagBonusRepo := &storageBonusRepo.Repository{DB: db} + castDb := castRepo.Repository{DB: db} + userRepo := &repo.UserRepository{DB: db, SecretEncryptionKey: secretEncryptionKeyBytes, HashingKey: hashingKeyBytes, StorageBonusRepo: storagBonusRepo, PasskeysRepository: passkeysRepo} + + twoFactorRepo := &repo.TwoFactorRepository{DB: db, SecretEncryptionKey: secretEncryptionKeyBytes} + userAuthRepo := &repo.UserAuthRepository{DB: db} + billingRepo := &repo.BillingRepository{DB: db} + userEntityRepo := &userEntityRepo.Repository{DB: db} + locationTagRepository := &locationtagRepo.Repository{DB: db} + authRepo := &authenticatorRepo.Repository{DB: db} + remoteStoreRepository := &remotestore.Repository{DB: db} + dataCleanupRepository := &datacleanup.Repository{DB: db} + taskLockingRepo := &repo.TaskLockRepository{DB: db} + notificationHistoryRepo := &repo.NotificationHistoryRepository{DB: db} + queueRepo := &repo.QueueRepository{DB: db} + objectRepo := &repo.ObjectRepository{DB: db, QueueRepo: queueRepo} + objectCleanupRepo := &repo.ObjectCleanupRepository{DB: db} + objectCopiesRepo := &repo.ObjectCopiesRepository{DB: db} + usageRepo := &repo.UsageRepository{DB: db, UserRepo: userRepo} + fileRepo := &repo.FileRepository{DB: db, S3Config: s3Config, QueueRepo: queueRepo, + ObjectRepo: objectRepo, ObjectCleanupRepo: objectCleanupRepo, + ObjectCopiesRepo: objectCopiesRepo, UsageRepo: usageRepo} + familyRepo := &repo.FamilyRepository{DB: db} + trashRepo := &repo.TrashRepository{DB: db, ObjectRepo: objectRepo, FileRepo: fileRepo, QueueRepo: queueRepo} + publicCollectionRepo := &repo.PublicCollectionRepository{DB: db} + collectionRepo := &repo.CollectionRepository{DB: db, FileRepo: fileRepo, PublicCollectionRepo: publicCollectionRepo, + TrashRepo: trashRepo, SecretEncryptionKey: secretEncryptionKeyBytes, QueueRepo: queueRepo, LatencyLogger: latencyLogger} + pushRepo := &repo.PushTokenRepository{DB: db} + kexRepo := &kex.Repository{ + DB: db, + } + embeddingRepo := &embedding.Repository{DB: db} + + authCache := cache.New(1*time.Minute, 15*time.Minute) + accessTokenCache := cache.New(1*time.Minute, 15*time.Minute) + discordController := discord.NewDiscordController(userRepo, hostName, environment) + rateLimiter := middleware.NewRateLimitMiddleware(discordController) + + lockController := &lock.LockController{ + TaskLockingRepo: taskLockingRepo, + HostName: hostName, + } + emailNotificationCtrl := &email.EmailNotificationController{ + UserRepo: userRepo, + LockController: lockController, + NotificationHistoryRepo: notificationHistoryRepo, + } + + userCache := cache2.NewUserCache() + userCacheCtrl := &usercache.Controller{UserCache: userCache, FileRepo: fileRepo, StoreBonusRepo: storagBonusRepo} + offerController := offer.NewOfferController(*userRepo, discordController, storagBonusRepo, userCacheCtrl) + plans := billing.GetPlans() + defaultPlan := billing.GetDefaultPlans(plans) + stripeClients := billing.GetStripeClients() + commonBillController := commonbilling.NewController(storagBonusRepo, userRepo, usageRepo) + appStoreController := controller.NewAppStoreController(defaultPlan, + billingRepo, fileRepo, userRepo, commonBillController) + + playStoreController := controller.NewPlayStoreController(defaultPlan, + billingRepo, fileRepo, userRepo, storagBonusRepo, commonBillController) + stripeController := controller.NewStripeController(plans, stripeClients, + billingRepo, fileRepo, userRepo, storagBonusRepo, discordController, emailNotificationCtrl, offerController, commonBillController) + billingController := controller.NewBillingController(plans, + appStoreController, playStoreController, stripeController, + discordController, emailNotificationCtrl, + billingRepo, userRepo, usageRepo, storagBonusRepo, commonBillController) + pushController := controller.NewPushController(pushRepo, taskLockingRepo, hostName) + mailingListsController := controller.NewMailingListsController() + + storageBonusCtrl := &storagebonus.Controller{ + UserRepo: userRepo, + StorageBonus: storagBonusRepo, + LockController: lockController, + CronRunning: false, + EmailNotificationController: emailNotificationCtrl, + } + + objectController := &controller.ObjectController{ + S3Config: s3Config, + ObjectRepo: objectRepo, + QueueRepo: queueRepo, + LockController: lockController, + } + objectCleanupController := controller.NewObjectCleanupController( + objectCleanupRepo, + objectRepo, + lockController, + objectController, + s3Config, + ) + + usageController := &controller.UsageController{ + BillingCtrl: billingController, + StorageBonusCtrl: storageBonusCtrl, + UserCacheCtrl: userCacheCtrl, + UsageRepo: usageRepo, + UserRepo: userRepo, + FamilyRepo: familyRepo, + FileRepo: fileRepo, + } + + fileController := &controller.FileController{ + FileRepo: fileRepo, + ObjectRepo: objectRepo, + ObjectCleanupRepo: objectCleanupRepo, + TrashRepository: trashRepo, + UserRepo: userRepo, + UsageCtrl: usageController, + CollectionRepo: collectionRepo, + TaskLockingRepo: taskLockingRepo, + QueueRepo: queueRepo, + ObjectCleanupCtrl: objectCleanupController, + LockController: lockController, + EmailNotificationCtrl: emailNotificationCtrl, + S3Config: s3Config, + HostName: hostName, + } + + replicationController3 := &controller.ReplicationController3{ + S3Config: s3Config, + ObjectRepo: objectRepo, + ObjectCopiesRepo: objectCopiesRepo, + DiscordController: discordController, + } + + trashController := &controller.TrashController{ + TrashRepo: trashRepo, + FileRepo: fileRepo, + CollectionRepo: collectionRepo, + QueueRepo: queueRepo, + TaskLockRepo: taskLockingRepo, + HostName: hostName, + } + + familyController := &family.Controller{ + FamilyRepo: familyRepo, + BillingCtrl: billingController, + UserRepo: userRepo, + UserCacheCtrl: userCacheCtrl, + } + + publicCollectionCtrl := &controller.PublicCollectionController{ + FileController: fileController, + EmailNotificationCtrl: emailNotificationCtrl, + PublicCollectionRepo: publicCollectionRepo, + CollectionRepo: collectionRepo, + UserRepo: userRepo, + JwtSecret: jwtSecretBytes, + } + + accessCtrl := access.NewAccessController(collectionRepo, fileRepo) + + collectionController := &controller.CollectionController{ + CollectionRepo: collectionRepo, + AccessCtrl: accessCtrl, + PublicCollectionCtrl: publicCollectionCtrl, + UserRepo: userRepo, + FileRepo: fileRepo, + CastRepo: &castDb, + BillingCtrl: billingController, + QueueRepo: queueRepo, + TaskRepo: taskLockingRepo, + LatencyLogger: latencyLogger, + } + + kexCtrl := &kexCtrl.Controller{ + Repo: kexRepo, + } + + userController := user.NewUserController( + userRepo, + usageRepo, + userAuthRepo, + twoFactorRepo, + passkeysRepo, + storagBonusRepo, + fileRepo, + collectionController, + collectionRepo, + dataCleanupRepository, + billingRepo, + secretEncryptionKeyBytes, + hashingKeyBytes, + authCache, + jwtSecretBytes, + billingController, + familyController, + discordController, + mailingListsController, + pushController, + userCache, + userCacheCtrl, + ) + + passkeyCtrl := &controller.PasskeyController{ + Repo: passkeysRepo, + UserRepo: userRepo, + } + + authMiddleware := middleware.AuthMiddleware{UserAuthRepo: userAuthRepo, Cache: authCache, UserController: userController} + accessTokenMiddleware := middleware.AccessTokenMiddleware{ + PublicCollectionRepo: publicCollectionRepo, + PublicCollectionCtrl: publicCollectionCtrl, + CollectionRepo: collectionRepo, + Cache: accessTokenCache, + BillingCtrl: billingController, + DiscordController: discordController, + } + + if environment != "local" { + gin.SetMode(gin.ReleaseMode) + } + server := gin.New() + + p := ginprometheus.NewPrometheus("museum") + p.ReqCntURLLabelMappingFn = urlSanitizer + p.Use(server) + + // note: the recover middleware must be in the last + server.Use(requestid.New(), middleware.Logger(urlSanitizer), cors(), gzip.Gzip(gzip.DefaultCompression), middleware.PanicRecover()) + + publicAPI := server.Group("/") + publicAPI.Use(rateLimiter.APIRateLimitMiddleware(urlSanitizer)) + + privateAPI := server.Group("/") + privateAPI.Use(authMiddleware.TokenAuthMiddleware(nil), rateLimiter.APIRateLimitForUserMiddleware(urlSanitizer)) + + adminAPI := server.Group("/admin") + adminAPI.Use(authMiddleware.TokenAuthMiddleware(nil), authMiddleware.AdminAuthMiddleware()) + paymentJwtAuthAPI := server.Group("/") + paymentJwtAuthAPI.Use(authMiddleware.TokenAuthMiddleware(jwt.PAYMENT.Ptr())) + + familiesJwtAuthAPI := server.Group("/") + //The middleware order matters. First, the userID must be set in the context, so that we can apply limit for user. + familiesJwtAuthAPI.Use(authMiddleware.TokenAuthMiddleware(jwt.FAMILIES.Ptr()), rateLimiter.APIRateLimitForUserMiddleware(urlSanitizer)) + + publicCollectionAPI := server.Group("/public-collection") + publicCollectionAPI.Use(accessTokenMiddleware.AccessTokenAuthMiddleware(urlSanitizer)) + + healthCheckHandler := &api.HealthCheckHandler{ + DB: db, + } + publicAPI.GET("/ping", timeout.New( + timeout.WithTimeout(5*time.Second), + timeout.WithHandler(healthCheckHandler.Ping), + timeout.WithResponse(timeOutResponse), + )) + + publicAPI.GET("/fire/db-m-ping", timeout.New( + timeout.WithTimeout(5*time.Second), + timeout.WithHandler(healthCheckHandler.PingDBStats), + timeout.WithResponse(timeOutResponse), + )) + + fileHandler := &api.FileHandler{ + Controller: fileController, + } + privateAPI.GET("/files/upload-urls", fileHandler.GetUploadURLs) + privateAPI.GET("/files/multipart-upload-urls", fileHandler.GetMultipartUploadURLs) + privateAPI.GET("/files/download/:fileID", fileHandler.Get) + privateAPI.GET("/files/download/v2/:fileID", fileHandler.Get) + privateAPI.GET("/files/preview/:fileID", fileHandler.GetThumbnail) + privateAPI.GET("/files/preview/v2/:fileID", fileHandler.GetThumbnail) + privateAPI.POST("/files", fileHandler.CreateOrUpdate) + privateAPI.PUT("/files/update", fileHandler.Update) + privateAPI.POST("/files/trash", fileHandler.Trash) + privateAPI.POST("/files/size", fileHandler.GetSize) + privateAPI.POST("/files/info", fileHandler.GetInfo) + privateAPI.GET("/files/duplicates", fileHandler.GetDuplicates) + privateAPI.GET("/files/large-thumbnails", fileHandler.GetLargeThumbnailFiles) + privateAPI.PUT("/files/thumbnail", fileHandler.UpdateThumbnail) + privateAPI.PUT("/files/magic-metadata", fileHandler.UpdateMagicMetadata) + privateAPI.PUT("/files/public-magic-metadata", fileHandler.UpdatePublicMagicMetadata) + publicAPI.GET("/files/count", fileHandler.GetTotalFileCount) + + kexHandler := &api.KexHandler{ + Controller: kexCtrl, + } + publicAPI.GET("/kex/get", kexHandler.GetKey) + publicAPI.PUT("/kex/add", kexHandler.AddKey) + + trashHandler := &api.TrashHandler{ + Controller: trashController, + } + privateAPI.GET("/trash/diff", trashHandler.GetDiff) + privateAPI.GET("/trash/v2/diff", trashHandler.GetDiffV2) + privateAPI.POST("/trash/delete", trashHandler.Delete) + privateAPI.POST("/trash/empty", trashHandler.Empty) + + userHandler := &api.UserHandler{ + UserController: userController, + } + publicAPI.POST("/users/ott", userHandler.SendOTT) + publicAPI.POST("/users/verify-email", userHandler.VerifyEmail) + publicAPI.POST("/users/two-factor/verify", userHandler.VerifyTwoFactor) + publicAPI.GET("/users/two-factor/recover", userHandler.RecoverTwoFactor) + publicAPI.POST("/users/two-factor/remove", userHandler.RemoveTwoFactor) + publicAPI.POST("/users/two-factor/passkeys/begin", userHandler.BeginPasskeyAuthenticationCeremony) + publicAPI.POST("/users/two-factor/passkeys/finish", userHandler.FinishPasskeyAuthenticationCeremony) + privateAPI.GET("/users/two-factor/status", userHandler.GetTwoFactorStatus) + privateAPI.POST("/users/two-factor/setup", userHandler.SetupTwoFactor) + privateAPI.POST("/users/two-factor/enable", userHandler.EnableTwoFactor) + privateAPI.POST("/users/two-factor/disable", userHandler.DisableTwoFactor) + privateAPI.PUT("/users/attributes", userHandler.SetAttributes) + privateAPI.PUT("/users/email-mfa", userHandler.UpdateEmailMFA) + privateAPI.PUT("/users/keys", userHandler.UpdateKeys) + privateAPI.POST("/users/srp/setup", userHandler.SetupSRP) + privateAPI.POST("/users/srp/complete", userHandler.CompleteSRPSetup) + privateAPI.POST("/users/srp/update", userHandler.UpdateSrpAndKeyAttributes) + publicAPI.GET("/users/srp/attributes", userHandler.GetSRPAttributes) + publicAPI.POST("/users/srp/verify-session", userHandler.VerifySRPSession) + publicAPI.POST("/users/srp/create-session", userHandler.CreateSRPSession) + privateAPI.PUT("/users/recovery-key", userHandler.SetRecoveryKey) + privateAPI.GET("/users/public-key", userHandler.GetPublicKey) + privateAPI.GET("/users/feedback", userHandler.GetRoadmapURL) + privateAPI.GET("/users/roadmap", userHandler.GetRoadmapURL) + privateAPI.GET("/users/roadmap/v2", userHandler.GetRoadmapURLV2) + privateAPI.GET("/users/session-validity/v2", userHandler.GetSessionValidityV2) + privateAPI.POST("/users/event", userHandler.ReportEvent) + privateAPI.POST("/users/logout", userHandler.Logout) + privateAPI.GET("/users/payment-token", userHandler.GetPaymentToken) + privateAPI.GET("/users/families-token", userHandler.GetFamiliesToken) + privateAPI.GET("/users/accounts-token", userHandler.GetAccountsToken) + privateAPI.GET("/users/details", userHandler.GetDetails) + privateAPI.GET("/users/details/v2", userHandler.GetDetailsV2) + privateAPI.POST("/users/change-email", userHandler.ChangeEmail) + privateAPI.GET("/users/sessions", userHandler.GetActiveSessions) + privateAPI.DELETE("/users/session", userHandler.TerminateSession) + privateAPI.GET("/users/delete-challenge", userHandler.GetDeleteChallenge) + privateAPI.DELETE("/users/delete", userHandler.DeleteUser) + + accountsJwtAuthAPI := server.Group("/") + accountsJwtAuthAPI.Use(authMiddleware.TokenAuthMiddleware(jwt.ACCOUNTS.Ptr()), rateLimiter.APIRateLimitForUserMiddleware(urlSanitizer)) + passkeysHandler := &api.PasskeyHandler{ + Controller: passkeyCtrl, + } + accountsJwtAuthAPI.GET("/passkeys", passkeysHandler.GetPasskeys) + accountsJwtAuthAPI.PATCH("/passkeys/:passkeyID", passkeysHandler.RenamePasskey) + accountsJwtAuthAPI.DELETE("/passkeys/:passkeyID", passkeysHandler.DeletePasskey) + accountsJwtAuthAPI.GET("/passkeys/registration/begin", passkeysHandler.BeginRegistration) + accountsJwtAuthAPI.POST("/passkeys/registration/finish", passkeysHandler.FinishRegistration) + + collectionHandler := &api.CollectionHandler{ + Controller: collectionController, + } + privateAPI.POST("/collections", collectionHandler.Create) + privateAPI.GET("/collections/:collectionID", collectionHandler.GetCollectionByID) + //lint:ignore SA1019 Deprecated API will be removed in the future + privateAPI.GET("/collections", collectionHandler.Get) + privateAPI.GET("/collections/v2", collectionHandler.GetV2) + privateAPI.POST("/collections/share", collectionHandler.Share) + privateAPI.POST("/collections/share-url", collectionHandler.ShareURL) + privateAPI.PUT("/collections/share-url", collectionHandler.UpdateShareURL) + privateAPI.DELETE("/collections/share-url/:collectionID", collectionHandler.UnShareURL) + privateAPI.POST("/collections/unshare", collectionHandler.UnShare) + privateAPI.POST("/collections/leave/:collectionID", collectionHandler.Leave) + privateAPI.POST("/collections/add-files", collectionHandler.AddFiles) + privateAPI.POST("/collections/move-files", collectionHandler.MoveFiles) + privateAPI.POST("/collections/restore-files", collectionHandler.RestoreFiles) + + privateAPI.POST("/collections/v3/remove-files", collectionHandler.RemoveFilesV3) + privateAPI.GET("/collections/v2/diff", collectionHandler.GetDiffV2) + privateAPI.GET("/collections/file", collectionHandler.GetFile) + privateAPI.GET("/collections/sharees", collectionHandler.GetSharees) + privateAPI.DELETE("/collections/v2/:collectionID", collectionHandler.Trash) + privateAPI.DELETE("/collections/v3/:collectionID", collectionHandler.TrashV3) + privateAPI.POST("/collections/rename", collectionHandler.Rename) + privateAPI.PUT("/collections/magic-metadata", collectionHandler.PrivateMagicMetadataUpdate) + privateAPI.PUT("/collections/public-magic-metadata", collectionHandler.PublicMagicMetadataUpdate) + privateAPI.PUT("/collections/sharee-magic-metadata", collectionHandler.ShareeMagicMetadataUpdate) + + publicCollectionHandler := &api.PublicCollectionHandler{ + Controller: publicCollectionCtrl, + FileCtrl: fileController, + CollectionCtrl: collectionController, + StorageBonusController: storageBonusCtrl, + } + + publicCollectionAPI.GET("/files/preview/:fileID", publicCollectionHandler.GetThumbnail) + publicCollectionAPI.GET("/files/download/:fileID", publicCollectionHandler.GetFile) + publicCollectionAPI.GET("/diff", publicCollectionHandler.GetDiff) + publicCollectionAPI.GET("/info", publicCollectionHandler.GetCollection) + publicCollectionAPI.GET("/upload-urls", publicCollectionHandler.GetUploadUrls) + publicCollectionAPI.GET("/multipart-upload-urls", publicCollectionHandler.GetMultipartUploadURLs) + publicCollectionAPI.POST("/file", publicCollectionHandler.CreateFile) + publicCollectionAPI.POST("/verify-password", publicCollectionHandler.VerifyPassword) + publicCollectionAPI.POST("/report-abuse", publicCollectionHandler.ReportAbuse) + + castAPI := server.Group("/cast") + + castCtrl := cast.NewController(&castDb, accessCtrl) + castMiddleware := middleware.CastMiddleware{CastCtrl: castCtrl, Cache: authCache} + castAPI.Use(castMiddleware.CastAuthMiddleware()) + + castHandler := &api.CastHandler{ + CollectionCtrl: collectionController, + FileCtrl: fileController, + Ctrl: castCtrl, + } + + publicAPI.POST("/cast/device-info/", castHandler.RegisterDevice) + privateAPI.GET("/cast/device-info/:deviceCode", castHandler.GetDeviceInfo) + publicAPI.GET("/cast/cast-data/:deviceCode", castHandler.GetCastData) + privateAPI.POST("/cast/cast-data/", castHandler.InsertCastData) + privateAPI.DELETE("/cast/revoke-all-tokens/", castHandler.RevokeAllToken) + + castAPI.GET("/files/preview/:fileID", castHandler.GetThumbnail) + castAPI.GET("/files/download/:fileID", castHandler.GetFile) + castAPI.GET("/diff", castHandler.GetDiff) + castAPI.GET("/info", castHandler.GetCollection) + familyHandler := &api.FamilyHandler{ + Controller: familyController, + } + + publicAPI.GET("/family/invite-info/:token", familyHandler.GetInviteInfo) + publicAPI.POST("/family/accept-invite", familyHandler.AcceptInvite) + + privateAPI.DELETE("/family/leave", familyHandler.Leave) // native/web app + + familiesJwtAuthAPI.POST("/family/create", familyHandler.CreateFamily) + familiesJwtAuthAPI.POST("/family/add-member", familyHandler.InviteMember) + familiesJwtAuthAPI.GET("/family/members", familyHandler.FetchMembers) + familiesJwtAuthAPI.DELETE("/family/remove-member/:id", familyHandler.RemoveMember) + familiesJwtAuthAPI.DELETE("/family/revoke-invite/:id", familyHandler.RevokeInvite) + + billingHandler := &api.BillingHandler{ + Controller: billingController, + AppStoreController: appStoreController, + PlayStoreController: playStoreController, + StripeController: stripeController, + } + publicAPI.GET("/billing/plans/v2", billingHandler.GetPlansV2) + privateAPI.GET("/billing/user-plans", billingHandler.GetUserPlans) + privateAPI.GET("/billing/usage", billingHandler.GetUsage) + privateAPI.GET("/billing/subscription", billingHandler.GetSubscription) + privateAPI.POST("/billing/verify-subscription", billingHandler.VerifySubscription) + publicAPI.POST("/billing/notify/android", billingHandler.AndroidNotificationHandler) + publicAPI.POST("/billing/notify/ios", billingHandler.IOSNotificationHandler) + publicAPI.POST("/billing/notify/stripe", billingHandler.StripeINNotificationHandler) + // after the StripeIN customers are completely migrated, we can change notify/stripe/us to notify/stripe and deprecate this endpoint + publicAPI.POST("/billing/notify/stripe/us", billingHandler.StripeUSNotificationHandler) + privateAPI.GET("/billing/stripe/customer-portal", billingHandler.GetStripeCustomerPortal) + privateAPI.POST("/billing/stripe/cancel-subscription", billingHandler.StripeCancelSubscription) + privateAPI.POST("/billing/stripe/activate-subscription", billingHandler.StripeActivateSubscription) + paymentJwtAuthAPI.GET("/billing/stripe-account-country", billingHandler.GetStripeAccountCountry) + paymentJwtAuthAPI.GET("/billing/stripe/checkout-session", billingHandler.GetCheckoutSession) + paymentJwtAuthAPI.POST("/billing/stripe/update-subscription", billingHandler.StripeUpdateSubscription) + + storageBonusHandler := &api.StorageBonusHandler{ + Controller: storageBonusCtrl, + } + + privateAPI.GET("/storage-bonus/details", storageBonusHandler.GetStorageBonusDetails) + privateAPI.GET("/storage-bonus/referral-view", storageBonusHandler.GetReferralView) + privateAPI.POST("/storage-bonus/referral-claim", storageBonusHandler.ClaimReferral) + + adminHandler := &api.AdminHandler{ + UserRepo: userRepo, + CollectionRepo: collectionRepo, + UserAuthRepo: userAuthRepo, + UserController: userController, + FamilyController: familyController, + FileRepo: fileRepo, + StorageBonusRepo: storagBonusRepo, + BillingRepo: billingRepo, + BillingController: billingController, + ObjectCleanupController: objectCleanupController, + MailingListsController: mailingListsController, + DiscordController: discordController, + HashingKey: hashingKeyBytes, + PasskeyController: passkeyCtrl, + } + adminAPI.POST("/mail", adminHandler.SendMail) + adminAPI.POST("/mail/subscribe", adminHandler.SubscribeMail) + adminAPI.POST("/mail/unsubscribe", adminHandler.UnsubscribeMail) + adminAPI.GET("/users", adminHandler.GetUsers) + adminAPI.GET("/user", adminHandler.GetUser) + adminAPI.POST("/user/disable-2fa", adminHandler.DisableTwoFactor) + adminAPI.POST("/user/disable-passkeys", adminHandler.RemovePasskeys) + adminAPI.POST("/user/close-family", adminHandler.CloseFamily) + adminAPI.DELETE("/user/delete", adminHandler.DeleteUser) + adminAPI.POST("/user/recover", adminHandler.RecoverAccount) + adminAPI.GET("/email-hash", adminHandler.GetEmailHash) + adminAPI.POST("/emails-from-hashes", adminHandler.GetEmailsFromHashes) + adminAPI.PUT("/user/subscription", adminHandler.UpdateSubscription) + adminAPI.POST("/user/bf-2013", adminHandler.UpdateBFDeal) + adminAPI.POST("/job/clear-orphan-objects", adminHandler.ClearOrphanObjects) + + userEntityController := &userEntityCtrl.Controller{Repo: userEntityRepo} + userEntityHandler := &api.UserEntityHandler{Controller: userEntityController} + + privateAPI.POST("/user-entity/key", userEntityHandler.CreateKey) + privateAPI.GET("/user-entity/key", userEntityHandler.GetKey) + privateAPI.POST("/user-entity/entity", userEntityHandler.CreateEntity) + privateAPI.PUT("/user-entity/entity", userEntityHandler.UpdateEntity) + privateAPI.DELETE("/user-entity/entity", userEntityHandler.DeleteEntity) + privateAPI.GET("/user-entity/entity/diff", userEntityHandler.GetDiff) + + locationTagController := &locationtag.Controller{Repo: locationTagRepository} + locationTagHandler := &api.LocationTagHandler{Controller: locationTagController} + privateAPI.POST("/locationtag/create", locationTagHandler.Create) + privateAPI.POST("/locationtag/update", locationTagHandler.Update) + privateAPI.DELETE("/locationtag/delete", locationTagHandler.Delete) + privateAPI.GET("/locationtag/diff", locationTagHandler.GetDiff) + + authenticatorController := &authenticatorCtrl.Controller{Repo: authRepo} + authenticatorHandler := &api.AuthenticatorHandler{Controller: authenticatorController} + + privateAPI.POST("/authenticator/key", authenticatorHandler.CreateKey) + privateAPI.GET("/authenticator/key", authenticatorHandler.GetKey) + privateAPI.POST("/authenticator/entity", authenticatorHandler.CreateEntity) + privateAPI.PUT("/authenticator/entity", authenticatorHandler.UpdateEntity) + privateAPI.DELETE("/authenticator/entity", authenticatorHandler.DeleteEntity) + privateAPI.GET("/authenticator/entity/diff", authenticatorHandler.GetDiff) + + remoteStoreController := &remoteStoreCtrl.Controller{Repo: remoteStoreRepository} + dataCleanupController := &dataCleanupCtrl.DeleteUserCleanupController{ + Repo: dataCleanupRepository, + UserRepo: userRepo, + CollectionRepo: collectionRepo, + TaskLockRepo: taskLockingRepo, + TrashRepo: trashRepo, + UsageRepo: usageRepo, + HostName: hostName, + } + remoteStoreHandler := &api.RemoteStoreHandler{Controller: remoteStoreController} + + privateAPI.POST("/remote-store/update", remoteStoreHandler.InsertOrUpdate) + privateAPI.GET("/remote-store", remoteStoreHandler.GetKey) + + pushHandler := &api.PushHandler{PushController: pushController} + privateAPI.POST("/push/token", pushHandler.AddToken) + + embeddingController := &embeddingCtrl.Controller{Repo: embeddingRepo, AccessCtrl: accessCtrl, ObjectCleanupController: objectCleanupController, S3Config: s3Config, FileRepo: fileRepo, CollectionRepo: collectionRepo, QueueRepo: queueRepo, TaskLockingRepo: taskLockingRepo, HostName: hostName} + embeddingHandler := &api.EmbeddingHandler{Controller: embeddingController} + + privateAPI.PUT("/embeddings", embeddingHandler.InsertOrUpdate) + privateAPI.GET("/embeddings/diff", embeddingHandler.GetDiff) + privateAPI.DELETE("/embeddings", embeddingHandler.DeleteAll) + + offerHandler := &api.OfferHandler{Controller: offerController} + publicAPI.GET("/offers/black-friday", offerHandler.GetBlackFridayOffers) + + setKnownAPIs(server.Routes()) + + setupAndStartBackgroundJobs(objectCleanupController, replicationController3) + setupAndStartCrons( + userAuthRepo, publicCollectionRepo, twoFactorRepo, passkeysRepo, fileController, taskLockingRepo, emailNotificationCtrl, + trashController, pushController, objectController, dataCleanupController, storageBonusCtrl, + embeddingController, healthCheckHandler, kexCtrl, castDb) + + // Create a new collector, the name will be used as a label on the metrics + collector := sqlstats.NewStatsCollector("prod_db", db) + // Register it with Prometheus + prometheus.MustRegister(collector) + + http.Handle("/metrics", promhttp.Handler()) + go http.ListenAndServe(":2112", nil) + go runServer(environment, server) + discordController.NotifyStartup() + log.Println("We have lift-off.") + + quit := make(chan os.Signal, 1) + signal.Notify(quit, syscall.SIGINT, syscall.SIGTERM) + <-quit + log.Println("Shutting down server...") + discordController.NotifyShutdown() +} + +func runServer(environment string, server *gin.Engine) { + if environment == "local" { + server.Run(":8080") + } else { + certPath, err := config.CredentialFilePath("tls.cert") + if err != nil { + log.Fatal(err) + } + + keyPath, err := config.CredentialFilePath("tls.key") + if err != nil { + log.Fatal(err) + } + + log.Fatal(server.RunTLS(":443", certPath, keyPath)) + } +} + +func setupLogger(environment string) { + log.SetReportCaller(true) + callerPrettyfier := func(f *runtime.Frame) (string, string) { + s := strings.Split(f.Function, ".") + funcName := s[len(s)-1] + return funcName, fmt.Sprintf("%s:%d", path.Base(f.File), f.Line) + } + logFile := viper.GetString("log-file") + if environment == "local" && logFile == "" { + log.SetFormatter(&log.TextFormatter{ + CallerPrettyfier: callerPrettyfier, + DisableQuote: true, + ForceColors: true, + }) + } else { + log.SetFormatter(&log.JSONFormatter{ + CallerPrettyfier: callerPrettyfier, + PrettyPrint: false, + }) + log.SetOutput(&lumberjack.Logger{ + Filename: logFile, + MaxSize: 100, + MaxAge: 30, + Compress: true, + }) + } +} + +func setupDatabase() *sql.DB { + log.Println("Setting up db") + db, err := sql.Open("postgres", config.GetPGInfo()) + + if err != nil { + log.Panic(err) + panic(err) + } + log.Println("Connected to DB") + err = db.Ping() + if err != nil { + panic(err) + } + log.Println("Pinged DB") + + driver, _ := postgres.WithInstance(db, &postgres.Config{}) + m, err := migrate.NewWithDatabaseInstance( + "file://migrations", "postgres", driver) + if err != nil { + log.Panic(err) + panic(err) + } + log.Println("Loaded migration scripts") + if err := m.Up(); err != nil && err != migrate.ErrNoChange { + log.Panic(err) + panic(err) + } + + db.SetMaxIdleConns(6) + db.SetMaxOpenConns(30) + + log.Println("Database was configured successfully.") + + return db +} + +func setupAndStartBackgroundJobs( + objectCleanupController *controller.ObjectCleanupController, + replicationController3 *controller.ReplicationController3, +) { + isReplicationEnabled := viper.GetBool("replication.enabled") + if isReplicationEnabled { + err := replicationController3.StartReplication() + if err != nil { + log.Warnf("Could not start replication v3: %s", err) + } + } else { + log.Info("Skipping Replication as replication is disabled") + } + + objectCleanupController.StartRemovingUnreportedObjects() + objectCleanupController.StartClearingOrphanObjects() +} + +func setupAndStartCrons(userAuthRepo *repo.UserAuthRepository, publicCollectionRepo *repo.PublicCollectionRepository, + twoFactorRepo *repo.TwoFactorRepository, passkeysRepo *passkey.Repository, fileController *controller.FileController, + taskRepo *repo.TaskLockRepository, emailNotificationCtrl *email.EmailNotificationController, + trashController *controller.TrashController, pushController *controller.PushController, + objectController *controller.ObjectController, + dataCleanupCtrl *dataCleanupCtrl.DeleteUserCleanupController, + storageBonusCtrl *storagebonus.Controller, + embeddingCtrl *embeddingCtrl.Controller, + healthCheckHandler *api.HealthCheckHandler, + kexCtrl *kexCtrl.Controller, + castDb castRepo.Repository) { + shouldSkipCron := viper.GetBool("jobs.cron.skip") + if shouldSkipCron { + log.Info("Skipping cron jobs") + return + } + + c := cron.New() + schedule(c, "@every 1m", func() { + _ = userAuthRepo.RemoveExpiredOTTs() + }) + + schedule(c, "@every 24h", func() { + _ = userAuthRepo.RemoveDeletedTokens(timeUtil.MicrosecondBeforeDays(30)) + _ = castDb.DeleteOldCodes(context.Background(), timeUtil.MicrosecondBeforeDays(1)) + _ = publicCollectionRepo.CleanupAccessHistory(context.Background()) + }) + + schedule(c, "@every 1m", func() { + _ = twoFactorRepo.RemoveExpiredTwoFactorSessions() + }) + schedule(c, "@every 1m", func() { + _ = twoFactorRepo.RemoveExpiredTempTwoFactorSecrets() + }) + schedule(c, "@every 1m", func() { + _ = passkeysRepo.RemoveExpiredPasskeySessions() + }) + schedule(c, "@every 1m", func() { + healthCheckHandler.PerformHealthCheck() + }) + + scheduleAndRun(c, "@every 60m", func() { + err := taskRepo.CleanupExpiredLocks() + if err != nil { + log.Printf("Error while cleaning up lock table, %s", err) + } + }) + + schedule(c, "@every 193s", func() { + fileController.CleanupDeletedFiles() + }) + schedule(c, "@every 101s", func() { + embeddingCtrl.CleanupDeletedEmbeddings() + }) + + schedule(c, "@every 120s", func() { + trashController.DropFileMetadataCron() + }) + + schedule(c, "@every 2m", func() { + objectController.RemoveComplianceHolds() + }) + + schedule(c, "@every 1m", func() { + trashController.CleanupTrashedCollections() + }) + + // 101s to avoid running too many cron at same time + schedule(c, "@every 101s", func() { + trashController.DeleteAgedTrashedFiles() + }) + + schedule(c, "@every 63s", func() { + storageBonusCtrl.PaymentUpgradeOrDowngradeCron() + }) + + // 67s to avoid running too many cron at same time + schedule(c, "@every 67s", func() { + trashController.ProcessEmptyTrashRequests() + }) + + schedule(c, "@every 30m", func() { + dataCleanupCtrl.DeleteDataCron() + }) + + schedule(c, "@every 24h", func() { + emailNotificationCtrl.SendStorageLimitExceededMails() + }) + + schedule(c, "@every 1m", func() { + pushController.SendPushes() + }) + + schedule(c, "@every 24h", func() { + pushController.ClearExpiredTokens() + }) + + scheduleAndRun(c, "@every 60m", func() { + kexCtrl.DeleteOldKeys() + }) + + c.Start() +} + +func cors() gin.HandlerFunc { + return func(c *gin.Context) { + c.Writer.Header().Set("Access-Control-Allow-Origin", c.GetHeader("Origin")) + c.Writer.Header().Set("Access-Control-Allow-Credentials", "true") + c.Writer.Header().Set("Access-Control-Allow-Headers", "Content-Type, Content-Length, Accept-Encoding, X-CSRF-Token, X-Auth-Token, X-Auth-Access-Token, X-Cast-Access-Token, X-Auth-Access-Token-JWT, X-Client-Package, X-Client-Version, Authorization, accept, origin, Cache-Control, X-Requested-With, upgrade-insecure-requests") + c.Writer.Header().Set("Access-Control-Expose-Headers", "X-Request-Id") + c.Writer.Header().Set("Access-Control-Allow-Methods", "POST, OPTIONS, GET, PUT, PATCH, DELETE") + c.Writer.Header().Set("Access-Control-Max-Age", "1728000") + + if c.Request.Method == http.MethodOptions { + c.AbortWithStatus(http.StatusNoContent) + return + } + c.Next() + } +} + +var knownAPIs = make(map[string]bool) + +func urlSanitizer(c *gin.Context) string { + if c.Request.Method == http.MethodOptions { + return "/options" + } + u := *c.Request.URL + u.RawQuery = "" + uri := u.RequestURI() + for _, p := range c.Params { + uri = strings.Replace(uri, p.Value, fmt.Sprintf(":%s", p.Key), 1) + } + if !knownAPIs[uri] { + log.Warn("Unknown API: " + uri) + return "/unknown-api" + } + return uri +} + +func timeOutResponse(c *gin.Context) { + c.JSON(http.StatusRequestTimeout, gin.H{"handler": true}) +} + +func setKnownAPIs(routes []gin.RouteInfo) { + for _, route := range routes { + knownAPIs[route.Path] = true + } +} + +// Schedule a cron job +func schedule(c *cron.Cron, spec string, cmd func()) (cron.EntryID, error) { + return c.AddFunc(spec, cmd) +} + +// Schedule a cron job, and run it once immediately too. +func scheduleAndRun(c *cron.Cron, spec string, cmd func()) (cron.EntryID, error) { + go cmd() + return schedule(c, spec, cmd) +} diff --git a/server/compose.yaml b/server/compose.yaml new file mode 100644 index 000000000..6972fc364 --- /dev/null +++ b/server/compose.yaml @@ -0,0 +1,93 @@ +services: + museum: + build: + context: . + args: + GIT_COMMIT: development-cluster + ports: + - 8080:8080 # API + - 2112:2112 # Prometheus metrics + depends_on: + postgres: + condition: service_healthy + environment: + # Pass-in the config to connect to the DB and MinIO + ENTE_CREDENTIALS_FILE: /credentials.yaml + volumes: + - custom-logs:/var/logs + - ./museum.yaml:/museum.yaml:ro + - ./scripts/compose/credentials.yaml:/credentials.yaml:ro + networks: + - internal + + # Resolve "localhost:3200" in the museum container to the minio container. + socat: + image: alpine/socat + network_mode: service:museum + depends_on: + - museum + command: "TCP-LISTEN:3200,fork,reuseaddr TCP:minio:3200" + + postgres: + image: postgres:12 + ports: + - 5432:5432 + environment: + POSTGRES_USER: pguser + POSTGRES_PASSWORD: pgpass + POSTGRES_DB: ente_db + # Wait for postgres to be accept connections before starting museum. + healthcheck: + test: + [ + "CMD", + "pg_isready", + "-q", + "-d", + "ente_db", + "-U", + "pguser" + ] + interval: 1s + timeout: 5s + retries: 20 + volumes: + - postgres-data:/var/lib/postgresql/data + networks: + - internal + + minio: + image: minio/minio + # Use different ports than the minio defaults to avoid conflicting + # with the ports used by Prometheus. + ports: + - 3200:3200 # API + - 3201:3201 # Console + environment: + MINIO_ROOT_USER: test + MINIO_ROOT_PASSWORD: testtest + command: server /data --address ":3200" --console-address ":3201" + volumes: + - minio-data:/data + networks: + - internal + + minio-provision: + image: minio/mc + depends_on: + - minio + volumes: + - ./scripts/compose/minio-provision.sh:/provision.sh:ro + - minio-data:/data + networks: + - internal + entrypoint: sh /provision.sh + +volumes: + custom-logs: + postgres-data: + minio-data: + + +networks: + internal: diff --git a/server/configurations/local.yaml b/server/configurations/local.yaml new file mode 100644 index 000000000..47d6f0d37 --- /dev/null +++ b/server/configurations/local.yaml @@ -0,0 +1,264 @@ +# Configuring museum +# ------------------ +# +# 1. If the environment variable `ENVIRONMENT` is specified, then it is used to +# load one of the files from the `configurations/` directory. If not present, +# then by default `local.yaml` (this file) will get loaded. +# +# 2. Then, museum will look for a file named `museum.yaml` in the current +# working directory. If found, this file will also be loaded, and entries +# specified therein will override the defaults specified here. +# +# 3. If the "credentials-file" config option is set, then museum will also load +# that and merge it in. +# +# 4. Config can be overridden with via environment variables (details below). +# +# Environment variables +# --------------------- +# +# All configuration options can be overridden via environment variables. The +# environment variable should have the prefix "ENTE_", and any nesting should be +# replaced by underscores. +# +# For example, the nested string "db.user" in the config file can alternatively +# be specified (or be overridden) by setting an environment variable named +# ENTE_DB_USER. +# +# +# Empty strings +# ------------- +# +# The empty string indicates missing values (to match go convention). +# +# This also means that to override a value that is specified in local.yaml in a +# subsequently loaded config file, you should specify the key as an empty string +# (`key: ""`) instead of leaving it unset. +# +# --- + +# If this option is specified, then it is loaded and gets merged-in over the +# defaults present in default.yaml. This provides a way to inject credentials +# and other overrides. +# +# The default is to look for a file named credentials.yaml in the CWD. +#credentials-file: credentials.yaml + +# Some credentials (e.g. the TLS cert) are cumbersome to provide inline in the +# YAML configuration file, thus these are loaded at runtime from separate files. +# +# This is the directory where museum should look for them. +# +# Currently, the following files are loaded (if needed) +# +# - credentials/{tls.cert,tls.key} +# - credentials/pst-service-account.json +# - credentials/fcm-service-account.json +# +# The default is to look for a these files in a directory named credentials +# under the CWD. +#credentials-dir: credentials + +# By default, museum logs to stdout when running locally. Specify this path to +# get it to log to a file instead. +# +# It must be specified if running in a non-local environment. +log-file: "" + +# Database connection parameters +db: + host: localhost + port: 5432 + name: ente_db + # These can be specified here, or alternatively provided via the environment + # as ENTE_DB_USER and ENTE_DB_PASSWORD. + user: + password: + +# Map of data centers +# +# Each data center also specifies which bucket in that provider should be used. +s3: + # Override the primary and secondary hot storage. The commented out values + # are the defaults. + # + #hot_storage: + # primary: b2-eu-cen + # secondary: wasabi-eu-central-2-v3 + b2-eu-cen: + key: + secret: + endpoint: + region: + bucket: + wasabi-eu-central-2-v3: + key: + secret: + endpoint: + region: + bucket: + # If enabled, this causes us to opt the object out of the compliance + # lock when the object is deleted. See "Wasabi Compliance". + # + # Currently this flag is only honoured for the Wasabi v3 bucket. + compliance: true + scw-eu-fr-v3: + key: + secret: + endpoint: + region: + bucket: + # If true, enable some workarounds to allow us to use a local minio instance + # for object storage. + # + # 1. Disable SSL. + # + # 2. Use "path" style S3 URLs where the bucket is part of the URL path, e.g. + # http://localhost:3200/b2-eu-cen. By default the bucket name is part of + # the (sub)domain, e.g. http://b2-eu-cen.localhost:3200/ and cannot be + # resolved when running locally. + # + # 3. Directly download the file during replication instead of going via the + # Cloudflare worker. + # + # 4. Do not specify storage classes when uploading objects (since minio does + # not support them, specifically it doesn't support GLACIER). + # + #are_local_buckets: true + +# Key used for encrypting customer emails before storing them in DB +# +# To make it easy to get started, some randomly generated values are provided +# here. But if you're really going to be using museum, please generate new keys. +# You can use `go run tools/gen-random-keys/main.go` for that. +key: + encryption: yvmG/RnzKrbCb9L3mgsmoxXr9H7i2Z4qlbT0mL3ln4w= + hash: KXYiG07wC7GIgvCSdg+WmyWdXDAn6XKYJtp/wkEU7x573+byBRAYtpTP0wwvi8i/4l37uicX1dVTUzwH3sLZyw== + +# JWT secrets +# +# To make it easy to get started, a randomly generated values is provided here. +# But if you're really going to be using museum, please generate new keys. You +# can use `go run tools/gen-random-keys/main.go` for that. +jwt: + secret: i2DecQmfGreG6q1vBj5tCokhlN41gcfS2cjOs9Po-u8= + +# Zoho Zeptomail config (optional) +# Use case: Sending emails +transmail: + # Transmail token + # Mail agent: dev + key: + +# Apple config (optional) +# Use case: In-app purchases +apple: + # Secret used when communicating with Apple for validating IAP receipts. + shared-secret: + +# Stripe config (optional) +# Use case: Payments +stripe: + us: + key: + webhook-secret: + in: + key: + webhook-secret: + whitelisted-redirect-urls: [] + path: + success: ?status=success&session_id={CHECKOUT_SESSION_ID} + cancel: ?status=fail&reason=canceled + +# Passkey support (WIP) +webauthn: + rpid: "example.com" + rporigins: + - "https://example.com:3005" + +# Roadmap SSO (optional) +# +# Allow the user to sign into an hosted roadmap service using their ente.io +# credentials. Here we can can configure the URL prefix and service levels +# credentials for SSO. +roadmap: + # The prefix of the URL the user should be redirected to + url-prefix: + # This secret can be obtained from the roadmap dashboard + sso-secret: + +# Discord config (optional) +# Use case: Devops +discord: + bot: + cha-ching: + token: + channel: + mona-lisa: + token: + channel: + +# Zoho Campaigns config (optional) +# Use case: Sending emails +zoho: + client-id: + client-secret: + refresh-token: + list-key: + topic-ids: + +# Various low-level configuration options +internal: + # If false (the default), then museum will notify the external world of + # various events. E.g, email users about their storage being full, send + # alerts to Discord, etc. + # + # It can be set to true when running a "read only" instance like a backup + # restoration test, where we want to be able to access data but otherwise + # minimize external side effects. + silent: false + # If provided, this external healthcheck url is periodically pinged. + health-check-url: + # Hardcoded verification codes, useful for logging in when developing. + hardcoded-ott: + emails: + - "example@example.org,123456" + # When running in a local environment, hardcode the verification code to + # 123456 for email addresses ending with @example.org + local-domain-suffix: "@example.org" + local-domain-value: 123456 + # List of user IDs that can use the admin API endpoints. + admins: [] + +# Replication config +# +# If enabled, replicate each file to 2 other data centers after it gets +# successfully uploaded to the primary hot storage. +replication: + enabled: false + # The Cloudflare worker to use to download files from the primary hot + # bucket. Must be specified if replication is enabled. + worker-url: + # Number of go routines to spawn for replication + # This is not related to the worker-url above. + # Optional, default value is indicated here. + worker-count: 6 + # Where to store temporary objects during replication v3 + # Optional, default value is indicated here. + tmp-storage: tmp/replication + +# Configuration for various background / cron jobs. +jobs: + cron: + # Instances run various cleanup, sending emails and other cron jobs. Use + # this flag to disable all these cron jobs. + skip: false + remove-unreported-objects: + # Number of go routines to spawn for object cleanup + # Optional, default value is indicated here. + worker-count: 1 + clear-orphan-objects: + # By default, this job is disabled. + enabled: false + # If provided, only objects that begin with this prefix are pruned. + prefix: "" diff --git a/server/configurations/production.yaml b/server/configurations/production.yaml new file mode 100644 index 000000000..6c7c20b81 --- /dev/null +++ b/server/configurations/production.yaml @@ -0,0 +1,6 @@ +log-file: /var/logs/museum.log + +stripe: + path: + success: ?status=success&session_id={CHECKOUT_SESSION_ID} + cancel: ?status=fail&reason=canceled diff --git a/server/ente/access.go b/server/ente/access.go new file mode 100644 index 000000000..b7c11023d --- /dev/null +++ b/server/ente/access.go @@ -0,0 +1,38 @@ +package ente + +type CollectionParticipantRole string + +const ( + VIEWER CollectionParticipantRole = "VIEWER" + OWNER CollectionParticipantRole = "OWNER" + COLLABORATOR CollectionParticipantRole = "COLLABORATOR" + UNKNOWN CollectionParticipantRole = "UNKNOWN" +) + +func (c *CollectionParticipantRole) CanAdd() bool { + if c == nil { + return false + } + return *c == OWNER || *c == COLLABORATOR +} + +// CanRemoveAny indicates if the role allows user to remove files added by others too +func (c *CollectionParticipantRole) CanRemoveAny() bool { + if c == nil { + return false + } + return *c == OWNER +} + +func ConvertStringToCollectionParticipantRole(value string) CollectionParticipantRole { + switch value { + case "VIEWER": + return VIEWER + case "OWNER": + return OWNER + case "COLLABORATOR": + return COLLABORATOR + default: + return UNKNOWN + } +} diff --git a/server/ente/admin.go b/server/ente/admin.go new file mode 100644 index 000000000..17290da14 --- /dev/null +++ b/server/ente/admin.go @@ -0,0 +1,99 @@ +package ente + +import ( + "errors" + "fmt" +) + +// GetEmailsFromHashesRequest represents a request to convert hashes +type GetEmailsFromHashesRequest struct { + Hashes []string `json:"hashes"` +} + +// Admin API request to disable 2FA for a user account. +// +// This is used when we get a user request to reset their 2FA when they might've +// lost access to their 2FA codes. We verify their identity out of band. +type DisableTwoFactorRequest struct { + UserID int64 `json:"userID" binding:"required"` +} + +type AdminOpsForUserRequest struct { + UserID int64 `json:"userID" binding:"required"` +} + +// RecoverAccount is used to recover accounts which are in soft-delete state. +type RecoverAccountRequest struct { + UserID int64 `json:"userID" binding:"required"` + EmailID string `json:"emailID" binding:"required"` +} + +// UpdateSubscriptionRequest is used to update a user's subscription +type UpdateSubscriptionRequest struct { + AdminID int64 `json:"-"` + UserID int64 `json:"userID" binding:"required"` + Storage int64 `json:"storage" binding:"required"` + PaymentProvider PaymentProvider `json:"paymentProvider"` + TransactionID string `json:"transactionID" binding:"required"` + ProductID string `json:"productID" binding:"required"` + ExpiryTime int64 `json:"expiryTime" binding:"required"` + Attributes SubscriptionAttributes `json:"attributes"` +} + +type AddOnAction string + +const ( + ADD AddOnAction = "ADD" + REMOVE AddOnAction = "REMOVE" + UPDATE AddOnAction = "UPDATE" +) + +type UpdateBlackFridayDeal struct { + Action AddOnAction `json:"action" binding:"required"` + UserID int64 `json:"userID" binding:"required"` + Year int `json:"year"` + StorageInGB int64 `json:"storageInGB"` + Testing bool `json:"testing"` + StorageInMB int64 `json:"storageInMB"` + Minute int64 `json:"minute"` +} + +func (u UpdateBlackFridayDeal) UpdateLog() string { + if u.Testing { + return fmt.Sprintf("BF_UPDATE_TESTING: %s, storageInMB: %d, minute: %d", u.Action, u.StorageInMB, u.Minute) + } else { + return fmt.Sprintf("BF_UPDATE: %s, storageInGB: %d, year: %d", u.Action, u.StorageInGB, u.Year) + } +} + +func (u UpdateBlackFridayDeal) Validate() error { + if u.Action == ADD || u.Action == UPDATE { + if u.Testing { + if u.StorageInMB == 0 && u.Minute == 0 { + return errors.New("invalid input, set in MB and minute for test") + } + } else { + if u.StorageInGB != 100 && u.StorageInGB != 2000 && u.StorageInGB != 500 { + return errors.New("invalid input for deal, only 100, 500, 2000 allowed") + } + if u.Year != 3 && u.Year != 5 { + return errors.New("invalid input for year, only 3 or 5") + } + } + } + return nil +} + +// ClearOrphanObjectsRequest is the API request to trigger the process for +// clearing orphan objects in DC. +// +// The optional prefix can be specified to limit the cleanup to objects that +// begin with that prefix. +// +// ForceTaskLock can be used to force the cleanup to start even if there is an +// existing task lock for the clear orphan objects task. +type ClearOrphanObjectsRequest struct { + DC string `json:"dc" binding:"required"` + Prefix string `json:"prefix"` + ForceTaskLock bool `json:"forceTaskLock"` +} diff --git a/server/ente/app.go b/server/ente/app.go new file mode 100644 index 000000000..bd02ed08f --- /dev/null +++ b/server/ente/app.go @@ -0,0 +1,28 @@ +package ente + +// PaymentProvider represents the payment provider via which a purchase was made +type App string + +const ( + Photos App = "photos" + Auth App = "auth" + Locker App = "locker" +) + +// Check if the app string is valid +func (a App) IsValid() bool { + switch a { + case Photos, Auth, Locker: + return true + } + return false +} + +// IsValidForCollection returns True if the given app type can create collections +func (a App) IsValidForCollection() bool { + switch a { + case Photos, Locker: + return true + } + return false +} diff --git a/server/ente/authenticator/authenticator.go b/server/ente/authenticator/authenticator.go new file mode 100644 index 000000000..4989333a5 --- /dev/null +++ b/server/ente/authenticator/authenticator.go @@ -0,0 +1,47 @@ +package authenticator + +import "github.com/google/uuid" + +type Key struct { + UserID int64 `json:"userID" binding:"required"` + EncryptedKey string `json:"encryptedKey" binding:"required"` + Header string `json:"header" binding:"required"` + CreatedAt int64 `json:"createdAt" binding:"required"` +} + +// Entity represents a single TOTP Entity +type Entity struct { + ID uuid.UUID `json:"id" binding:"required"` + UserID int64 `json:"userID" binding:"required"` + EncryptedData *string `json:"encryptedData" binding:"required"` + Header *string `json:"header" binding:"required"` + IsDeleted bool `json:"isDeleted" binding:"required"` + CreatedAt int64 `json:"createdAt" binding:"required"` + UpdatedAt int64 `json:"updatedAt" binding:"required"` +} + +// CreateKeyRequest represents a request to create totp encryption key for user +type CreateKeyRequest struct { + EncryptedKey string `json:"encryptedKey" binding:"required"` + Header string `json:"header" binding:"required"` +} + +// CreateEntityRequest... +type CreateEntityRequest struct { + EncryptedData string `json:"encryptedData" binding:"required"` + Header string `json:"header" binding:"required"` +} + +// UpdateEntityRequest... +type UpdateEntityRequest struct { + ID uuid.UUID `json:"id" binding:"required"` + EncryptedData string `json:"encryptedData" binding:"required"` + Header string `json:"header" binding:"required"` +} + +// GetEntityDiffRequest... +type GetEntityDiffRequest struct { + // SinceTime *int64. Pointer allows us to pass 0 value otherwise binding fails for zero Value. + SinceTime *int64 `form:"sinceTime" binding:"required"` + Limit int16 `form:"limit" binding:"required"` +} diff --git a/server/ente/billing.go b/server/ente/billing.go new file mode 100644 index 000000000..4d8d3401d --- /dev/null +++ b/server/ente/billing.go @@ -0,0 +1,188 @@ +package ente + +import ( + "database/sql/driver" + "encoding/json" + + "github.com/ente-io/stacktrace" + "github.com/stripe/stripe-go/v72" + "github.com/stripe/stripe-go/v72/client" +) + +const ( + // FreePlanStorage is the amount of storage in free plan + FreePlanStorage = 1 * 1024 * 1024 * 1024 + // FreePlanProductID is the product ID of free plan + FreePlanProductID = "free" + // FreePlanTransactionID is the dummy transaction ID for the free plan + FreePlanTransactionID = "none" + // TrialPeriodDuration is the duration of the free trial + TrialPeriodDuration = 365 + // TrialPeriod is the unit for the duration of the free trial + TrialPeriod = "days" + + // PeriodYear is the unit for the duration of the yearly plan + PeriodYear = "year" + + // PeriodMonth is the unit for the duration of the monthly plan + PeriodMonth = "month" + + Period3Years = "3years" + + Period5Years = "5years" + + // FamilyPlanProductID is the product ID of family (internal employees & their friends & family) plan + FamilyPlanProductID = "family" + + // StripeSignature is the header send by the stripe webhook to verify authenticity + StripeSignature = "Stripe-Signature" + + // OnHoldTemplate is the template for the email + // that is to be sent out when an account enters the hold stage + OnHoldTemplate = "on_hold.html" + + // AccountOnHoldEmailSubject is the subject of account on hold email + AccountOnHoldEmailSubject = "ente account on hold" + + // Template for the email we send out when the user's subscription ends, + // either because the user cancelled their subscription, or because it + // expired. + SubscriptionEndedEmailTemplate = "subscription_ended.html" + + // Subject for `SubscriptionEndedEmailTemplate`. + SubscriptionEndedEmailSubject = "Your subscription to ente Photos has ended" +) + +// PaymentProvider represents the payment provider via which a purchase was made +type PaymentProvider string + +const ( + // PlayStore was the payment provider + PlayStore PaymentProvider = "playstore" + // AppStore was the payment provider + AppStore PaymentProvider = "appstore" + // Stripe was the payment provider + Stripe PaymentProvider = "stripe" + // Paypal was the payment provider + Paypal PaymentProvider = "paypal" + // BitPay was the payment provider + BitPay PaymentProvider = "bitpay" +) + +type StripeAccountCountry string + +type BillingPlansPerCountry map[string][]BillingPlan + +type BillingPlansPerAccount map[StripeAccountCountry]BillingPlansPerCountry + +type StripeClientPerAccount map[StripeAccountCountry]*client.API + +const ( + StripeIN StripeAccountCountry = "IN" + StripeUS StripeAccountCountry = "US" +) + +const DefaultStripeAccountCountry = StripeUS + +// AndroidNotification represents a notification received from PlayStore +type AndroidNotification struct { + Message AndroidNotificationMessage `json:"message"` + Subscription string `json:"subscription"` +} + +// AndroidNotificationMessage represents the message within the notification received from +// PlayStore +type AndroidNotificationMessage struct { + Attributes map[string]string `json:"attributes"` + Data string `json:"data"` + MessageID string `json:"messageId"` +} + +// BillingPlan represents a billing plan +type BillingPlan struct { + ID string `json:"id"` + AndroidID string `json:"androidID"` + IOSID string `json:"iosID"` + StripeID string `json:"stripeID"` + Storage int64 `json:"storage"` + Price string `json:"price"` + Period string `json:"period"` +} + +type FreePlan struct { + Storage int `json:"storage"` + Duration int `json:"duration"` + Period string `json:"period"` +} + +// Subscription represents a user's subscription to a billing plan +type Subscription struct { + ID int64 `json:"id"` + UserID int64 `json:"userID"` + // Identifier of the product on respective stores that the user has subscribed to + ProductID string `json:"productID"` + Storage int64 `json:"storage"` + // LinkedPurchaseToken on PlayStore , OriginalTransactionID on AppStore and SubscriptionID on Stripe + OriginalTransactionID string `json:"originalTransactionID"` + ExpiryTime int64 `json:"expiryTime"` + PaymentProvider PaymentProvider `json:"paymentProvider"` + Attributes SubscriptionAttributes `json:"attributes"` + Price string `json:"price"` + Period string `json:"period"` +} + +// SubscriptionAttributes represents a subscription's paymentProvider specific attributes +type SubscriptionAttributes struct { + // IsCancelled represents if subscription's renewal have been cancelled + IsCancelled bool `json:"isCancelled,omitempty"` + // CustomerID represents the stripe customerID + CustomerID string `json:"customerID,omitempty"` + // LatestVerificationData is the the latestTransactionReceipt received + LatestVerificationData string `json:"latestVerificationData,omitempty"` + // StripeAccountCountry is the identifier for the account in which the subscription is created. + StripeAccountCountry StripeAccountCountry `json:"stripeAccountCountry,omitempty"` +} + +// Value implements the driver.Valuer interface. This method +// simply returns the JSON-encoded representation of the struct. +func (ca SubscriptionAttributes) Value() (driver.Value, error) { + return json.Marshal(ca) +} + +// Scan implements the sql.Scanner interface. This method +// simply decodes a JSON-encoded value into the struct fields. +func (ca *SubscriptionAttributes) Scan(value interface{}) error { + b, ok := value.([]byte) + if !ok { + return stacktrace.NewError("type assertion to []byte failed") + } + + return json.Unmarshal(b, &ca) +} + +// SubscriptionVerificationRequest represents a request to verify a subscription done via a paymentProvider +type SubscriptionVerificationRequest struct { + PaymentProvider PaymentProvider `json:"paymentProvider"` + ProductID string `json:"productID"` + VerificationData string `json:"verificationData"` +} + +// StripeUpdateRequest represents a request to modify the stripe subscription +type StripeUpdateRequest struct { + ProductID string `json:"productID"` +} +type SubscriptionUpdateResponse struct { + Status string `json:"status"` + ClientSecret string `json:"clientSecret"` +} + +type StripeSubscriptionInfo struct { + PlanCountry string + AccountCountry StripeAccountCountry +} + +type StripeEventLog struct { + UserID int64 + StripeSubscription stripe.Subscription + Event stripe.Event +} diff --git a/server/ente/cache/user_data_cache.go b/server/ente/cache/user_data_cache.go new file mode 100644 index 000000000..45308f066 --- /dev/null +++ b/server/ente/cache/user_data_cache.go @@ -0,0 +1,56 @@ +package cache + +import ( + "fmt" + "github.com/ente-io/museum/ente" + "github.com/ente-io/museum/ente/storagebonus" + "sync" +) + +// UserCache struct holds can be used to fileCount various entities for user. +type UserCache struct { + mu sync.Mutex + fileCache map[string]int64 + bonusCache map[int64]*storagebonus.ActiveStorageBonus +} + +// NewUserCache creates a new instance of the UserCache struct. +func NewUserCache() *UserCache { + return &UserCache{ + fileCache: make(map[string]int64), + bonusCache: make(map[int64]*storagebonus.ActiveStorageBonus), + } +} + +// SetFileCount updates the fileCount with the given userID and fileCount. +func (c *UserCache) SetFileCount(userID, fileCount int64, app ente.App) { + c.mu.Lock() + defer c.mu.Unlock() + c.fileCache[cacheKey(userID, app)] = fileCount +} + +func (c *UserCache) SetBonus(userID int64, bonus *storagebonus.ActiveStorageBonus) { + c.mu.Lock() + defer c.mu.Unlock() + c.bonusCache[userID] = bonus +} + +func (c *UserCache) GetBonus(userID int64) (*storagebonus.ActiveStorageBonus, bool) { + c.mu.Lock() + defer c.mu.Unlock() + bonus, ok := c.bonusCache[userID] + return bonus, ok +} + +// GetFileCount retrieves the file count from the fileCount for the given userID. +// It returns the file count and a boolean indicating if the value was found. +func (c *UserCache) GetFileCount(userID int64, app ente.App) (int64, bool) { + c.mu.Lock() + defer c.mu.Unlock() + count, ok := c.fileCache[cacheKey(userID, app)] + return count, ok +} + +func cacheKey(userID int64, app ente.App) string { + return fmt.Sprintf("%d-%s", userID, app) +} diff --git a/server/ente/cast/entity.go b/server/ente/cast/entity.go new file mode 100644 index 000000000..deffa90b9 --- /dev/null +++ b/server/ente/cast/entity.go @@ -0,0 +1,19 @@ +package cast + +// CastRequest .. +type CastRequest struct { + CollectionID int64 `json:"collectionID" binding:"required"` + CastToken string `json:"castToken" binding:"required"` + EncPayload string `json:"encPayload" binding:"required"` + DeviceCode string `json:"deviceCode" binding:"required"` +} + +type RegisterDeviceRequest struct { + DeviceCode *string `json:"deviceCode"` + PublicKey string `json:"publicKey" binding:"required"` +} + +type AuthContext struct { + CollectionID int64 + UserID int64 +} diff --git a/server/ente/collection.go b/server/ente/collection.go new file mode 100644 index 000000000..763d07b9b --- /dev/null +++ b/server/ente/collection.go @@ -0,0 +1,147 @@ +package ente + +import ( + "database/sql/driver" + "encoding/json" + + "github.com/ente-io/stacktrace" +) + +var ValidCollectionTypes = []string{"album", "folder", "favorites", "uncategorized"} + +// Collection represents a collection +type Collection struct { + ID int64 `json:"id"` + Owner CollectionUser `json:"owner"` + EncryptedKey string `json:"encryptedKey" binding:"required"` + KeyDecryptionNonce string `json:"keyDecryptionNonce,omitempty" binding:"required"` + Name string `json:"name"` + EncryptedName string `json:"encryptedName"` + NameDecryptionNonce string `json:"nameDecryptionNonce"` + Type string `json:"type" binding:"required"` + Attributes CollectionAttributes `json:"attributes,omitempty" binding:"required"` + Sharees []CollectionUser `json:"sharees"` + PublicURLs []PublicURL `json:"publicURLs"` + UpdationTime int64 `json:"updationTime"` + IsDeleted bool `json:"isDeleted,omitempty"` + MagicMetadata *MagicMetadata `json:"magicMetadata,omitempty"` + App string `json:"app"` + PublicMagicMetadata *MagicMetadata `json:"pubMagicMetadata,omitempty"` + // SharedMagicMetadata keeps the metadata of the sharees to store settings like + // if the collection should be shown on timeline or not + SharedMagicMetadata *MagicMetadata `json:"sharedMagicMetadata,omitempty"` +} + +// AllowSharing indicates if this particular collection type can be shared +// or not +func (c *Collection) AllowSharing() bool { + if c == nil { + return false + } + if c.Type == "favorites" || c.Type == "uncategorized" { + return false + } + return true +} + +// AllowDelete indicates if this particular collection type can be deleted by the user +// or not +func (c *Collection) AllowDelete() bool { + if c == nil { + return false + } + if c.Type == "favorites" || c.Type == "uncategorized" { + return false + } + return true +} + +// CollectionUser represents the owner of a collection +type CollectionUser struct { + ID int64 `json:"id"` + Email string `json:"email"` + // Deprecated + Name string `json:"name"` + Role CollectionParticipantRole `json:"role"` +} + +// CollectionAttributes represents a collection's attribtues +type CollectionAttributes struct { + EncryptedPath string `json:"encryptedPath,omitempty"` + PathDecryptionNonce string `json:"pathDecryptionNonce,omitempty"` + Version int `json:"version"` +} + +// Value implements the driver.Valuer interface. This method +// simply returns the JSON-encoded representation of the struct. +func (ca CollectionAttributes) Value() (driver.Value, error) { + return json.Marshal(ca) +} + +// Scan implements the sql.Scanner interface. This method +// simply decodes a JSON-encoded value into the struct fields. +func (ca *CollectionAttributes) Scan(value interface{}) error { + b, ok := value.([]byte) + if !ok { + return stacktrace.NewError("type assertion to []byte failed") + } + + return json.Unmarshal(b, &ca) +} + +// AlterShareRequest represents a share/unshare request +type AlterShareRequest struct { + CollectionID int64 `json:"collectionID" binding:"required"` + Email string `json:"email" binding:"required"` + EncryptedKey string `json:"encryptedKey"` + Role *CollectionParticipantRole `json:"role"` +} + +// AddFilesRequest represents a request to add files to a collection +type AddFilesRequest struct { + CollectionID int64 `json:"collectionID" binding:"required"` + Files []CollectionFileItem `json:"files" binding:"required"` +} + +// RemoveFilesRequest represents a request to remove files from a collection +type RemoveFilesRequest struct { + CollectionID int64 `json:"collectionID" binding:"required"` + // OtherFileIDs represents the files which don't belong the user trying to remove files + FileIDs []int64 `json:"fileIDs"` +} + +// RemoveFilesV3Request represents request payload for v3 version of removing files from collection +// In V3, only those files are allowed to be removed from collection which don't belong to the collection owner. +// If collection owner wants to remove files owned by them, the client should move those files to other collections +// owned by the collection user. Also, See [Collection Delete Versions] for additional context. +type RemoveFilesV3Request struct { + CollectionID int64 `json:"collectionID" binding:"required"` + // OtherFileIDs represents the files which don't belong the user trying to remove files + FileIDs []int64 `json:"fileIDs" binding:"required"` +} + +type RenameRequest struct { + CollectionID int64 `json:"collectionID" binding:"required"` + EncryptedName string `json:"encryptedName" binding:"required"` + NameDecryptionNonce string `json:"nameDecryptionNonce" binding:"required"` +} + +// UpdateCollectionMagicMetadata payload for updating magic metadata for single file +type UpdateCollectionMagicMetadata struct { + ID int64 `json:"id" binding:"required"` + MagicMetadata MagicMetadata `json:"magicMetadata" binding:"required"` +} + +// CollectionFileItem represents a file in an AddFilesRequest and MoveFilesRequest +type CollectionFileItem struct { + ID int64 `json:"id" binding:"required"` + EncryptedKey string `json:"encryptedKey" binding:"required"` + KeyDecryptionNonce string `json:"keyDecryptionNonce" binding:"required"` +} + +// MoveFilesRequest represents movement of file between two collections +type MoveFilesRequest struct { + FromCollectionID int64 `json:"fromCollectionID" binding:"required"` + ToCollectionID int64 `json:"toCollectionID" binding:"required"` + Files []CollectionFileItem `json:"files" binding:"required"` +} diff --git a/server/ente/data_cleanup/entity.go b/server/ente/data_cleanup/entity.go new file mode 100644 index 000000000..0d98eec9a --- /dev/null +++ b/server/ente/data_cleanup/entity.go @@ -0,0 +1,28 @@ +package data_cleanup + +// Stage represents the action to be taken on the next scheduled run for a particular stage +type Stage string + +const ( + // Scheduled means user data is scheduled for deletion + Scheduled Stage = "scheduled" + // Collection means trash all collections for the user + Collection Stage = "collection" + // Trash means trigger empty trash for the user + Trash Stage = "trash" + // Storage means check for consumed storage + Storage Stage = "storage" + // Completed means data clean up is done + Completed Stage = "completed" +) + +type DataCleanup struct { + UserID int64 + Stage Stage + // StageScheduleTime indicates when should we process current stage + StageScheduleTime int64 + // StageAttemptCount refers to number of attempts made to execute current stage + StageAttemptCount int + CreatedAt int64 + UpdatedAt int64 +} diff --git a/server/ente/details/userdetails.go b/server/ente/details/userdetails.go new file mode 100644 index 000000000..d049a931b --- /dev/null +++ b/server/ente/details/userdetails.go @@ -0,0 +1,19 @@ +package details + +import ( + "github.com/ente-io/museum/ente" + "github.com/ente-io/museum/ente/storagebonus" +) + +type UserDetailsResponse struct { + Email string `json:"email,omitempty"` + Usage int64 `json:"usage"` + Subscription ente.Subscription `json:"subscription"` + FamilyData *ente.FamilyMemberResponse `json:"familyData,omitempty"` + FileCount *int64 `json:"fileCount,omitempty"` + // Deprecated field. Client doesn't consume this field. We can completely remove it after Aug 2023 + SharedCollectionsCount *int64 `json:"sharedCollectionsCount,omitempty"` + StorageBonus int64 `json:"storageBonus"` + ProfileData *ente.ProfileData `json:"profileData"` + BonusData *storagebonus.ActiveStorageBonus `json:"bonusData"` +} diff --git a/server/ente/email.go b/server/ente/email.go new file mode 100644 index 000000000..b2671cca3 --- /dev/null +++ b/server/ente/email.go @@ -0,0 +1,36 @@ +package ente + +const ( + // TransmailEndPoint is the mailing endpoint of TransMail (now called + // ZeptoMail), Zoho's transactional email service. + TransmailEndPoint = "https://api.transmail.com/v1.1/email" + // BounceAddress is the emailAddress to send bounce messages to + TransmailEndBounceAddress = "bounces@bounce.ente.io" +) + +type SendEmailRequest struct { + To []string `json:"to" binding:"required"` + FromName string `json:"fromName" binding:"required"` + FromEmail string `json:"fromEmail" binding:"required"` + Subject string `json:"subject" binding:"required"` + Body string `json:"body" binding:"required"` +} + +type Mail struct { + BounceAddress string `json:"bounce_address"` + From EmailAddress `json:"from"` + To []ToEmailAddress `json:"to"` + Bcc []ToEmailAddress `json:"bcc"` + Subject string `json:"subject"` + Htmlbody string `json:"htmlbody"` + InlineImages []map[string]interface{} `json:"inline_images"` +} + +type ToEmailAddress struct { + EmailAddress EmailAddress `json:"email_address"` +} + +type EmailAddress struct { + Address string `json:"address"` + Name string `json:"name"` +} diff --git a/server/ente/embedding.go b/server/ente/embedding.go new file mode 100644 index 000000000..b59332ec6 --- /dev/null +++ b/server/ente/embedding.go @@ -0,0 +1,37 @@ +package ente + +type Embedding struct { + FileID int64 `json:"fileID"` + Model string `json:"model"` + EncryptedEmbedding string `json:"encryptedEmbedding"` + DecryptionHeader string `json:"decryptionHeader"` + UpdatedAt int64 `json:"updatedAt"` +} + +type InsertOrUpdateEmbeddingRequest struct { + FileID int64 `json:"fileID" binding:"required"` + Model string `json:"model" binding:"required"` + EncryptedEmbedding string `json:"encryptedEmbedding" binding:"required"` + DecryptionHeader string `json:"decryptionHeader" binding:"required"` +} + +type GetEmbeddingDiffRequest struct { + Model Model `form:"model"` + // SinceTime *int64. Pointer allows us to pass 0 value otherwise binding fails for zero Value. + SinceTime *int64 `form:"sinceTime" binding:"required"` + Limit int16 `form:"limit" binding:"required"` +} + +type Model string + +const ( + OnnxClip Model = "onnx-clip" + GgmlClip Model = "ggml-clip" +) + +type EmbeddingObject struct { + Version int `json:"v"` + EncryptedEmbedding string `json:"embedding"` + DecryptionHeader string `json:"header"` + Client string `json:"client"` +} diff --git a/server/ente/errors.go b/server/ente/errors.go new file mode 100644 index 000000000..49aed7151 --- /dev/null +++ b/server/ente/errors.go @@ -0,0 +1,253 @@ +package ente + +import ( + "errors" + "fmt" + "net/http" +) + +// ErrPermissionDenied is returned when a user has insufficient permissions to +// perform an action +var ErrPermissionDenied = errors.New("insufficient permissions to perform this action") + +// ErrIncorrectOTT is returned when a user tries to validate an email with an +// incorrect OTT +var ErrIncorrectOTT = errors.New("incorrect OTT") + +// ErrExpiredOTT is returned when a user tries to validate an email but there's no active ott +var ErrExpiredOTT = errors.New("no active OTT") + +// ErrIncorrectTOTP is returned when a user tries to validate an two factor with an +// incorrect TOTP +var ErrIncorrectTOTP = errors.New("incorrect TOTP") + +// ErrNotFound is returned when the requested resource was not found +var ErrNotFound = errors.New("not found") + +var ErrFileLimitReached = errors.New("file limit reached") + +// ErrBadRequest is returned when a bad request is encountered +var ErrBadRequest = errors.New("bad request") + +// ErrTooManyBadRequest is returned when user send many bad requests, especailly for authentication +var ErrTooManyBadRequest = errors.New("too many bad request") + +// ErrUnexpectedState is returned when certain assumption/assets fails +var ErrUnexpectedState = errors.New("unexpected state") + +// ErrCannotDowngrade is thrown when a user tries to downgrade to a plan whose +// limits are lower than current consumption +var ErrCannotDowngrade = errors.New("usage is greater than selected plan, cannot downgrade") + +// ErrCannotSwitchPaymentProvider is thrown when a user attempts to renew a subscription from a different payment provider +var ErrCannotSwitchPaymentProvider = errors.New("cannot switch payment provider") + +// ErrNoActiveSubscription is returned when user's doesn't has any active plans +var ErrNoActiveSubscription = errors.New("no Active Subscription") + +// ErrStorageLimitExceeded is thrown when user exceed the plan's data Storage limit +var ErrStorageLimitExceeded = errors.New("storage Limit exceeded") + +// ErrFileTooLarge thrown when an uploaded file is too large for the storage plan +var ErrFileTooLarge = errors.New("file too large") + +// ErrSharingDisabledForFreeAccounts is thrown when free subscription user tries to share files +var ErrSharingDisabledForFreeAccounts = errors.New("sharing Feature is disabled for free accounts") + +// ErrDuplicateFileObjectFound is thrown when another file with the same objectKey is detected +var ErrDuplicateFileObjectFound = errors.New("file object already exists") + +var ErrFavoriteCollectionAlreadyExist = errors.New("favorites collection already exists") + +var ErrUncategorizeCollectionAlreadyExists = errors.New("uncategorized collection already exists") + +// ErrDuplicateThumbnailObjectFound is thrown when another thumbnail with the same objectKey is detected +var ErrDuplicateThumbnailObjectFound = errors.New("thumbnail object already exists") + +// ErrVersionMismatch is thrown when for versioned updates, client is sending incorrect version to server +var ErrVersionMismatch = errors.New("client version is out of sync") + +// ErrCanNotInviteUserWithPaidPlan is thrown when a family admin tries to invite another user with active paid plan +var ErrCanNotInviteUserWithPaidPlan = errors.New("can not invite user with active paid plan") + +// ErrBatchSizeTooLarge is thrown when api request batch size is greater than API limit +var ErrBatchSizeTooLarge = errors.New("batch size greater than API limit") + +// ErrAuthenticationRequired is thrown when authentication vector is missing +var ErrAuthenticationRequired = errors.New("authentication required") + +// ErrInvalidPassword is thrown when incorrect password is provided by user +var ErrInvalidPassword = errors.New("invalid password") + +// ErrCanNotInviteUserAlreadyInFamily is thrown when a family admin tries to invite another user with active paid plan +var ErrCanNotInviteUserAlreadyInFamily = errors.New("can not invite user who is already part of a family") + +// ErrFamilySizeLimitReached is thrown when a family admin tries to invite more than max allowed members for family plan +var ErrFamilySizeLimitReached = errors.New("can't invite new member, family already at max allowed size") + +// ErrUserDeleted is thrown when Get user is called for a deleted account +var ErrUserDeleted = errors.New("user account has been deleted") + +// ErrLockUnavailable is thrown when a lock could not be acquired +var ErrLockUnavailable = errors.New("could not acquire lock") + +// ErrActiveLinkAlreadyExists is thrown when the collection already has active public link +var ErrActiveLinkAlreadyExists = errors.New("Collection already has active public link") + +// ErrNotImplemented indicates that the action that we tried to perform is not +// available at this museum instance. e.g. this could be something that is not +// enabled on this particular instance of museum. +// +// Semantically, it could've been better called as NotAvailable, but +// NotAvailable is meant to be used for temporary errors, whilst we wish to +// indicate that this instance will not serve this request at all. +var ErrNotImplemented = errors.New("not implemented") + +var ErrInvalidApp = errors.New("invalid app") + +var ErrInvalidName = errors.New("invalid name") + +var ErrSubscriptionAlreadyClaimed = ApiError{ + Code: SubscriptionAlreadyClaimed, + HttpStatusCode: http.StatusConflict, + Message: "Subscription is already associted with different account", +} + +var ErrCollectionNotEmpty = ApiError{ + Code: CollectionNotEmpty, + HttpStatusCode: http.StatusConflict, + Message: "The collection is not empty", +} + +var ErrFileNotFoundInAlbum = ApiError{ + Code: FileNotFoundInAlbum, + HttpStatusCode: http.StatusNotFound, + Message: "File is either deleted or moved to different collection", +} + +var ErrPublicCollectDisabled = ApiError{ + Code: PublicCollectDisabled, + Message: "User has not enabled public collect for this url", + HttpStatusCode: http.StatusMethodNotAllowed, +} + +var ErrNotFoundError = ApiError{ + Code: NotFoundError, + Message: "", + HttpStatusCode: http.StatusNotFound, +} + +var ErrMaxPasskeysReached = ApiError{ + Code: MaxPasskeysReached, + Message: "Max passkeys limit reached", + HttpStatusCode: http.StatusConflict, +} + +var ErrCastPermissionDenied = ApiError{ + Code: "CAST_PERMISSION_DENIED", + Message: "Permission denied", + HttpStatusCode: http.StatusForbidden, +} + +type ErrorCode string + +const ( + // Standard, generic error codes + BadRequest ErrorCode = "BAD_REQUEST" + CONFLICT ErrorCode = "CONFLICT" + + InternalError ErrorCode = "INTERNAL_ERROR" + + NotFoundError ErrorCode = "NOT_FOUND" + + // Business specific error codes + FamiliySizeLimitExceeded ErrorCode = "FAMILY_SIZE_LIMIT_EXCEEDED" + + // Subscription Already Associted with different account + SubscriptionAlreadyClaimed ErrorCode = "SUBSCRIPTION_ALREADY_CLAIMED" + + FileNotFoundInAlbum ErrorCode = "FILE_NOT_FOUND_IN_ALBUM" + + // PublicCollectDisabled error code indicates that the user has not enabled public collect + PublicCollectDisabled ErrorCode = "PUBLIC_COLLECT_DISABLED" + + // CollectionNotEmpty is thrown when user attempts to delete a collection but keep files but all files from that + // collections have been moved yet. + CollectionNotEmpty ErrorCode = "COLLECTION_NOT_EMPTY" + + // MaxPasskeysReached is thrown when user attempts to create more than max allowed passkeys + MaxPasskeysReached ErrorCode = "MAX_PASSKEYS_REACHED" +) + +type ApiError struct { + // Code will be returned as part of the response body. Clients are expected to rely on this code while handling any error + Code ErrorCode `json:"code"` + // Optional message, which can give additional details about this error. Say for generic 404 error, it can return what entity is not found + // like file/album/user. Client should never consume this message for showing err on screen or any special handling. + Message string `json:"message"` + HttpStatusCode int `json:"-"` +} + +func (e *ApiError) NewErr(message string) *ApiError { + return &ApiError{ + Code: e.Code, + Message: message, + HttpStatusCode: e.HttpStatusCode, + } +} +func (e *ApiError) Error() string { + return fmt.Sprintf("%s : %s", string(e.Code), e.Message) +} + +type ApiErrorParams struct { + HttpStatusCode *int + Code ErrorCode + Message string +} + +var badRequestApiError = ApiError{ + Code: BadRequest, + HttpStatusCode: http.StatusBadRequest, + Message: "BAD_REQUEST", +} + +func NewBadRequestError(params *ApiErrorParams) *ApiError { + if params == nil { + return &badRequestApiError + } + apiError := badRequestApiError + if params.HttpStatusCode != nil { + apiError.HttpStatusCode = *params.HttpStatusCode + } + if params.Message != "" { + apiError.Message = params.Message + } + if params.Code != "" { + apiError.Code = params.Code + } + return &apiError +} +func NewBadRequestWithMessage(message string) *ApiError { + return &ApiError{ + Code: BadRequest, + HttpStatusCode: http.StatusBadRequest, + Message: message, + } +} + +func NewConflictError(message string) *ApiError { + return &ApiError{ + Code: CONFLICT, + HttpStatusCode: http.StatusConflict, + Message: message, + } +} + +func NewInternalError(message string) *ApiError { + apiError := ApiError{ + Code: InternalError, + HttpStatusCode: http.StatusInternalServerError, + Message: message, + } + return &apiError +} diff --git a/server/ente/family.go b/server/ente/family.go new file mode 100644 index 000000000..e01ae0242 --- /dev/null +++ b/server/ente/family.go @@ -0,0 +1,71 @@ +package ente + +import ( + "github.com/google/uuid" +) + +type MemberStatus string + +const ( + SELF MemberStatus = "SELF" + CLOSED MemberStatus = "CLOSED" + INVITED MemberStatus = "INVITED" + ACCEPTED MemberStatus = "ACCEPTED" + DECLINED MemberStatus = "DECLINED" + REVOKED MemberStatus = "REVOKED" + REMOVED MemberStatus = "REMOVED" + LEFT MemberStatus = "LEFT" +) + +type InviteMemberRequest struct { + Email string `json:"email" binding:"required"` +} + +type InviteInfoResponse struct { + ID uuid.UUID `json:"id" binding:"required"` + AdminEmail string `json:"adminEmail" binding:"required"` +} + +type AcceptInviteResponse struct { + AdminEmail string `json:"adminEmail" binding:"required"` + Storage int64 `json:"storage" binding:"required"` + ExpiryTime int64 `json:"expiryTime" binding:"required"` +} + +type AcceptInviteRequest struct { + Token string `json:"token" binding:"required"` +} + +type FamilyMember struct { + ID uuid.UUID `json:"id" binding:"required"` + Email string `json:"email" binding:"required"` + Status MemberStatus `json:"status" binding:"required"` + // This information should not be sent back in the response if the membership status is `INVITED` + Usage int64 `json:"usage"` + IsAdmin bool `json:"isAdmin"` + MemberUserID int64 `json:"-"` // for internal use only, ignore from json response + AdminUserID int64 `json:"-"` // for internal use only, ignore from json response +} + +type FamilyMemberResponse struct { + Members []FamilyMember `json:"members" binding:"required"` + // Family admin subscription storage capacity. This excludes add-on and any other bonus storage + Storage int64 `json:"storage" binding:"required"` + // Family admin subscription expiry time + ExpiryTime int64 `json:"expiryTime" binding:"required"` + + AdminBonus int64 `json:"adminBonus" binding:"required"` +} + +type UserUsageWithSubData struct { + UserID int64 + // StorageConsumed by the current member. + // This information should not be sent back in the response if the membership status is `INVITED` + StorageConsumed int64 + // ExpiryTime of member's current subscription plan + ExpiryTime int64 + // Storage indicates storage capacity based on member's current subscription plan + Storage int64 + // Email of the member. It will be populated on need basis + Email *string +} diff --git a/server/ente/file.go b/server/ente/file.go new file mode 100644 index 000000000..4a69473e3 --- /dev/null +++ b/server/ente/file.go @@ -0,0 +1,213 @@ +package ente + +import ( + "database/sql/driver" + "encoding/json" + + "github.com/ente-io/stacktrace" +) + +// File represents an encrypted file in the system +type File struct { + ID int64 `json:"id"` + OwnerID int64 `json:"ownerID"` + CollectionID int64 `json:"collectionID"` + CollectionOwnerID *int64 `json:"collectionOwnerID"` + EncryptedKey string `json:"encryptedKey"` + KeyDecryptionNonce string `json:"keyDecryptionNonce"` + File FileAttributes `json:"file" binding:"required"` + Thumbnail FileAttributes `json:"thumbnail" binding:"required"` + Metadata FileAttributes `json:"metadata" binding:"required"` + // IsDeleted is True when the file ID is removed from the CollectionID + IsDeleted bool `json:"isDeleted"` + UpdationTime int64 `json:"updationTime"` + MagicMetadata *MagicMetadata `json:"magicMetadata,omitempty"` + PubicMagicMetadata *MagicMetadata `json:"pubMagicMetadata,omitempty"` + Info *FileInfo `json:"info,omitempty"` +} + +// FileInfo has information about storage used by the file & it's metadata(future) +type FileInfo struct { + FileSize int64 `json:"fileSize,omitempty"` + ThumbnailSize int64 `json:"thumbSize,omitempty"` +} + +// Value implements the driver.Valuer interface. This method +// simply returns the JSON-encoded representation of the struct. +func (fi FileInfo) Value() (driver.Value, error) { + return json.Marshal(fi) +} + +// Scan implements the sql.Scanner interface. This method +// simply decodes a JSON-encoded value into the struct fields. +func (fi *FileInfo) Scan(value interface{}) error { + if value == nil { + return nil + } + b, ok := value.([]byte) + if !ok { + return stacktrace.NewError("type assertion to []byte failed") + } + return json.Unmarshal(b, &fi) +} + +// UpdateFileResponse represents a response to the UpdateFileRequest +type UpdateFileResponse struct { + ID int64 `json:"id" binding:"required"` + UpdationTime int64 `json:"updationTime" binding:"required"` +} + +// FileIDsRequest represents a request where we just pass fileIDs as payload +type FileIDsRequest struct { + FileIDs []int64 `json:"fileIDs" binding:"required"` +} + +type FileInfoResponse struct { + ID int64 `json:"id"` + FileInfo FileInfo `json:"fileInfo"` +} +type FilesInfoResponse struct { + FilesInfo []*FileInfoResponse `json:"filesInfo"` +} + +type TrashRequest struct { + OwnerID int64 // ownerID will be set internally via auth header + TrashItems []TrashItemRequest `json:"items" binding:"required"` +} + +// TrashItemRequest represents the request payload for deleting one file +type TrashItemRequest struct { + FileID int64 `json:"fileID" binding:"required"` + // collectionID belonging to same owner + CollectionID int64 `json:"collectionID" binding:"required"` +} + +// GetSizeRequest represents a request to get the size of files +type GetSizeRequest struct { + FileIDs []int64 `json:"fileIDs" binding:"required"` +} + +// FileAttributes represents a file item +type FileAttributes struct { + ObjectKey string `json:"objectKey,omitempty"` + EncryptedData string `json:"encryptedData,omitempty"` + DecryptionHeader string `json:"decryptionHeader" binding:"required"` + Size int64 `json:"size"` +} + +type MagicMetadata struct { + Version int `json:"version,omitempty" binding:"required"` + // Count indicates number of keys in the json presentation of magic attributes. + // On edit/update, this number should be >= previous version. + Count int `json:"count,omitempty" binding:"required"` + // Data represents the encrypted blob for jsonEncoded attributes using file key. + Data string `json:"data,omitempty" binding:"required"` + // Header used for decrypting the encrypted attr on the client. + Header string `json:"header,omitempty" binding:"required"` +} + +// Value implements the driver.Valuer interface. This method +// simply returns the JSON-encoded representation of the struct. +func (mmd MagicMetadata) Value() (driver.Value, error) { + return json.Marshal(mmd) +} + +// Scan implements the sql.Scanner interface. This method +// simply decodes a JSON-encoded value into the struct fields. +func (mmd *MagicMetadata) Scan(value interface{}) error { + if value == nil { + return nil + } + b, ok := value.([]byte) + if !ok { + return stacktrace.NewError("type assertion to []byte failed") + } + return json.Unmarshal(b, &mmd) +} + +// UpdateMagicMetadata payload for updating magic metadata for single file +type UpdateMagicMetadata struct { + ID int64 `json:"id" binding:"required"` + MagicMetadata MagicMetadata `json:"magicMetadata" binding:"required"` +} + +// UpdateMultipleMagicMetadataRequest request payload for updating magic metadata for list of files +type UpdateMultipleMagicMetadataRequest struct { + MetadataList []UpdateMagicMetadata `json:"metadataList" binding:"required"` +} + +// UploadURL represents the upload url for a specific object +type UploadURL struct { + ObjectKey string `json:"objectKey"` + URL string `json:"url"` +} + +// MultipartUploadURLs represents the part upload url for a specific object +type MultipartUploadURLs struct { + ObjectKey string `json:"objectKey"` + PartURLs []string `json:"partURLs"` + CompleteURL string `json:"completeURL"` +} + +type ObjectType string + +const ( + FILE ObjectType = "file" + THUMBNAIL ObjectType = "thumbnail" +) + +// S3ObjectKey represents the s3 object key and corresponding fileID for it +type S3ObjectKey struct { + FileID int64 + ObjectKey string + FileSize int64 + Type ObjectType +} + +// ObjectCopies represents a row from the object_copies table. +// +// It contains information about which replicas a given object key should be and +// has been replicated to. +type ObjectCopies struct { + ObjectKey string + WantB2 bool + B2 *int64 + WantWasabi bool + Wasabi *int64 + WantSCW bool + SCW *int64 +} + +// ObjectState represents details about an object that are needed for +// pre-flights checks during replication. +// +// This information is obtained by joining various tables. +type ObjectState struct { + // true if the file corresponding to this object has been deleted (or cannot + // be found) + IsFileDeleted bool + // true if the owner of the file corresponding to this object has deleted + // their account (or cannot be found). + IsUserDeleted bool + // Size of the object, in bytes. + Size int64 +} + +// TempObject represents a entry in tempObjects table +type TempObject struct { + ObjectKey string + IsMultipart bool + UploadID string + DataCenter string +} + +// DuplicateFiles represents duplicate files +type DuplicateFiles struct { + FileIDs []int64 `json:"fileIDs"` + Size int64 `json:"size"` +} + +type UpdateThumbnailRequest struct { + FileID int64 `json:"fileID" binding:"required"` + Thumbnail FileAttributes `json:"thumbnail" binding:"required"` +} diff --git a/server/ente/jwt/jwt.go b/server/ente/jwt/jwt.go new file mode 100644 index 000000000..34511f5e3 --- /dev/null +++ b/server/ente/jwt/jwt.go @@ -0,0 +1,53 @@ +package jwt + +import ( + "errors" + + "github.com/ente-io/museum/pkg/utils/time" +) + +type ClaimScope string + +const ( + PAYMENT ClaimScope = "PAYMENT" + FAMILIES ClaimScope = "FAMILIES" + ACCOUNTS ClaimScope = "ACCOUNTS" + DELETE_ACCOUNT ClaimScope = "DELETE_ACCOUNT" +) + +func (c ClaimScope) Ptr() *ClaimScope { + return &c +} + +type WebCommonJWTClaim struct { + UserID int64 `json:"userID"` + ExpiryTime int64 `json:"expiryTime"` + ClaimScope *ClaimScope `json:"claimScope"` +} + +func (w *WebCommonJWTClaim) GetScope() ClaimScope { + if w.ClaimScope == nil { + return PAYMENT + } + return *w.ClaimScope +} + +func (w WebCommonJWTClaim) Valid() error { + if w.ExpiryTime < time.Microseconds() { + return errors.New("token expired") + } + return nil +} + +// PublicAlbumPasswordClaim refer to token granted post public album password verification +type PublicAlbumPasswordClaim struct { + PassHash string `json:"passKey"` + ExpiryTime int64 `json:"expiryTime"` +} + +func (c PublicAlbumPasswordClaim) Valid() error { + if c.ExpiryTime < time.Microseconds() { + return errors.New("token expired") + } + return nil +} diff --git a/server/ente/kex.go b/server/ente/kex.go new file mode 100644 index 000000000..9f63ad9af --- /dev/null +++ b/server/ente/kex.go @@ -0,0 +1,6 @@ +package ente + +type AddWrappedKeyRequest struct { + WrappedKey string `json:"wrappedKey" binding:"required"` + CustomIdentifier string `json:"customIdentifier"` +} diff --git a/server/ente/locationtag.go b/server/ente/locationtag.go new file mode 100644 index 000000000..61c191006 --- /dev/null +++ b/server/ente/locationtag.go @@ -0,0 +1,59 @@ +package ente + +import ( + "database/sql/driver" + "encoding/json" + "github.com/ente-io/stacktrace" + "github.com/google/uuid" +) + +// LocationTag represents a location tag in the system. The location information +// is stored in an encrypted as Attributes +type LocationTag struct { + ID uuid.UUID `json:"id"` + OwnerID int64 `json:"ownerId,omitempty"` + EncryptedKey string `json:"encryptedKey" binding:"required"` + KeyDecryptionNonce string `json:"keyDecryptionNonce" binding:"required"` + Attributes LocationTagAttribute `json:"attributes" binding:"required"` + IsDeleted bool `json:"isDeleted"` + Provider string `json:"provider,omitempty"` + CreatedAt int64 `json:"createdAt,omitempty"` // utc epoch microseconds + UpdatedAt int64 `json:"updatedAt,omitempty"` // utc epoch microseconds +} + +// LocationTagAttribute holds encrypted data about user's location tag. +type LocationTagAttribute struct { + Version int `json:"version,omitempty" binding:"required"` + EncryptedData string `json:"encryptedData,omitempty" binding:"required"` + DecryptionNonce string `json:"decryptionNonce,omitempty" binding:"required"` +} + +// Value implements the driver.Valuer interface. This method +// simply returns the JSON-encoded representation of the struct. +func (la LocationTagAttribute) Value() (driver.Value, error) { + return json.Marshal(la) +} + +// Scan implements the sql.Scanner interface. This method +// simply decodes a JSON-encoded value into the struct fields. +func (la *LocationTagAttribute) Scan(value interface{}) error { + b, ok := value.([]byte) + if !ok { + return stacktrace.NewError("type assertion to []byte failed") + } + return json.Unmarshal(b, &la) +} + +// DeleteLocationTagRequest is request structure for deleting a location tag +type DeleteLocationTagRequest struct { + ID uuid.UUID `json:"id" binding:"required"` + OwnerID int64 // should be populated from req headers +} + +// GetLocationTagDiffRequest is request struct for fetching locationTag changes +type GetLocationTagDiffRequest struct { + // SinceTime *int64. Pointer allows us to pass 0 value otherwise binding fails for zero Value. + SinceTime *int64 `form:"sinceTime" binding:"required"` + Limit int16 `form:"limit" binding:"required"` + OwnerID int64 // should be populated from req headers +} diff --git a/server/ente/offer.go b/server/ente/offer.go new file mode 100644 index 000000000..4cadde364 --- /dev/null +++ b/server/ente/offer.go @@ -0,0 +1,13 @@ +package ente + +// BlackFridayOffer represents the latest Black Friday Offer +type BlackFridayOffer struct { + ID string `json:"id"` + Storage int64 `json:"storage"` + Price string `json:"price"` + OldPrice string `json:"oldPrice"` + Period string `json:"period"` + PaymentLink string `json:"paymentLink"` +} + +type BlackFridayOfferPerCountry map[string][]BlackFridayOffer diff --git a/server/ente/passkey.go b/server/ente/passkey.go new file mode 100644 index 000000000..0ed41965c --- /dev/null +++ b/server/ente/passkey.go @@ -0,0 +1,14 @@ +package ente + +import "github.com/google/uuid" + +// Passkey is our way of keeping track of user credentials and storing useful info for users. +type Passkey struct { + ID uuid.UUID `json:"id"` + UserID int64 `json:"userID"` + FriendlyName string `json:"friendlyName"` + + CreatedAt int64 `json:"createdAt"` +} + +var MaxPasskeys = 10 diff --git a/server/ente/passkeyCredential.go b/server/ente/passkeyCredential.go new file mode 100644 index 000000000..3da7cbcc2 --- /dev/null +++ b/server/ente/passkeyCredential.go @@ -0,0 +1,94 @@ +package ente + +import ( + "encoding/base64" + "encoding/json" + "strings" + + "github.com/go-webauthn/webauthn/protocol" + "github.com/go-webauthn/webauthn/webauthn" + "github.com/google/uuid" +) + +// PasskeyCredential are the actual WebAuthn credentials we will send back to the user during auth for the browser to check if they have an eligible authenticator. +type PasskeyCredential struct { + PasskeyID uuid.UUID `json:"passkeyID"` + + CredentialID string `json:"credentialID"` // string + + PublicKey string `json:"publicKey"` // b64 []byte + AttestationType string `json:"attestationType"` + AuthenticatorTransports string `json:"authenticatorTransports"` // comma-separated slice of strings + CredentialFlags string `json:"credentialFlags"` // json encoded struct + Authenticator string `json:"authenticator"` // json encoded struct with b64 []byte for AAGUID + + CreatedAt int64 `json:"createdAt"` +} + +// de-serialization function into a webauthn.Credential +func (c *PasskeyCredential) WebAuthnCredential() (cred *webauthn.Credential, err error) { + + decodedID, err := base64.StdEncoding.DecodeString(c.CredentialID) + if err != nil { + return + } + + cred = &webauthn.Credential{ + ID: decodedID, + AttestationType: c.AttestationType, + } + + transports := []protocol.AuthenticatorTransport{} + transportStrings := strings.Split(c.AuthenticatorTransports, ",") + for _, t := range transportStrings { + transports = append(transports, protocol.AuthenticatorTransport(string(t))) + } + + cred.Transport = transports + + // decode b64 back to []byte + publicKeyByte, err := base64.StdEncoding.DecodeString(c.PublicKey) + if err != nil { + return + } + + cred.PublicKey = publicKeyByte + + err = json.Unmarshal( + []byte(c.CredentialFlags), + &cred.Flags, + ) + if err != nil { + return + } + + authenticatorMap := map[string]interface{}{} + + err = json.Unmarshal( + []byte(c.Authenticator), + &authenticatorMap, + ) + if err != nil { + return + } + + // decode the AAGUID base64 back to []byte + aaguidByte, err := base64.StdEncoding.DecodeString( + authenticatorMap["AAGUID"].(string), + ) + if err != nil { + return + } + + authenticator := webauthn.Authenticator{ + AAGUID: aaguidByte, + SignCount: uint32(authenticatorMap["SignCount"].(float64)), + CloneWarning: authenticatorMap["CloneWarning"].(bool), + Attachment: protocol.AuthenticatorAttachment(authenticatorMap["Attachment"].(string)), + } + + cred.Authenticator = authenticator + + return + +} diff --git a/server/ente/public_collection.go b/server/ente/public_collection.go new file mode 100644 index 000000000..6e71e35b1 --- /dev/null +++ b/server/ente/public_collection.go @@ -0,0 +1,148 @@ +package ente + +import ( + "database/sql/driver" + "encoding/json" + + "github.com/ente-io/stacktrace" +) + +// CreatePublicAccessTokenRequest payload for creating accessToken for public albums +type CreatePublicAccessTokenRequest struct { + CollectionID int64 `json:"collectionID" binding:"required"` + EnableCollect bool `json:"enableCollect"` + ValidTill int64 `json:"validTill"` + DeviceLimit int `json:"deviceLimit"` +} + +type UpdatePublicAccessTokenRequest struct { + CollectionID int64 `json:"collectionID" binding:"required"` + ValidTill *int64 `json:"validTill"` + DeviceLimit *int `json:"deviceLimit"` + PassHash *string `json:"passHash"` + Nonce *string `json:"nonce"` + MemLimit *int64 `json:"memLimit"` + OpsLimit *int64 `json:"opsLimit"` + EnableDownload *bool `json:"enableDownload"` + EnableCollect *bool `json:"enableCollect"` + DisablePassword *bool `json:"disablePassword"` +} + +type VerifyPasswordRequest struct { + PassHash string `json:"passHash" binding:"required"` +} + +type VerifyPasswordResponse struct { + JWTToken string `json:"jwtToken"` +} + +// PublicCollectionToken represents row entity for public_collection_token table +type PublicCollectionToken struct { + ID int64 + CollectionID int64 + Token string + DeviceLimit int + ValidTill int64 + IsDisabled bool + PassHash *string + Nonce *string + MemLimit *int64 + OpsLimit *int64 + EnableDownload bool + EnableCollect bool +} + +// PublicURL represents information about non-disabled public url for a collection +type PublicURL struct { + URL string `json:"url"` + DeviceLimit int `json:"deviceLimit"` + ValidTill int64 `json:"validTill"` + EnableDownload bool `json:"enableDownload"` + // Enable collect indicates whether folks can upload files in a publicly shared url + EnableCollect bool `json:"enableCollect"` + PasswordEnabled bool `json:"passwordEnabled"` + // Nonce contains the nonce value for the password if the link is password protected. + Nonce *string `json:"nonce,omitempty"` + MemLimit *int64 `json:"memLimit,omitempty"` + OpsLimit *int64 `json:"opsLimit,omitempty"` +} + +type PublicAccessContext struct { + ID int64 + IP string + UserAgent string + CollectionID int64 +} + +// PublicCollectionSummary represents an information about a public collection +type PublicCollectionSummary struct { + ID int64 + CollectionID int64 + IsDisabled bool + ValidTill int64 + DeviceLimit int + CreatedAt int64 + UpdatedAt int64 + DeviceAccessCount int + // not empty value of passHash indicates that the link is password protected. + PassHash *string +} + +type AbuseReportRequest struct { + URL string `json:"url" binding:"required"` + Reason string `json:"reason" binding:"required"` + Details AbuseReportDetails `json:"details" binding:"required"` +} + +type AbuseReportDetails struct { + FullName string `json:"fullName" binding:"required"` + Email string `json:"email" binding:"required"` + Signature string `json:"signature" binding:"required"` + Comment string `json:"comment"` + OnBehalfOf string `json:"onBehalfOf"` + JobTitle string `json:"jobTitle"` + Address *ReporterAddress `json:"address"` +} + +type ReporterAddress struct { + Stress string `json:"street" binding:"required"` + City string `json:"city" binding:"required"` + State string `json:"state" binding:"required"` + Country string `json:"country" binding:"required"` + PostalCode string `json:"postalCode" binding:"required"` + Phone string `json:"phone" binding:"required"` +} + +// Value implements the driver.Valuer interface. This method +// simply returns the JSON-encoded representation of the struct. +func (ca AbuseReportDetails) Value() (driver.Value, error) { + return json.Marshal(ca) +} + +// Scan implements the sql.Scanner interface. This method +// simply decodes a JSON-encoded value into the struct fields. +func (ca *AbuseReportDetails) Scan(value interface{}) error { + b, ok := value.([]byte) + if !ok { + return stacktrace.NewError("type assertion to []byte failed") + } + + return json.Unmarshal(b, &ca) +} + +// Value implements the driver.Valuer interface. This method +// simply returns the JSON-encoded representation of the struct. +func (ca ReporterAddress) Value() (driver.Value, error) { + return json.Marshal(ca) +} + +// Scan implements the sql.Scanner interface. This method +// simply decodes a JSON-encoded value into the struct fields. +func (ca *ReporterAddress) Scan(value interface{}) error { + b, ok := value.([]byte) + if !ok { + return stacktrace.NewError("type assertion to []byte failed") + } + + return json.Unmarshal(b, &ca) +} diff --git a/server/ente/push.go b/server/ente/push.go new file mode 100644 index 000000000..adcb877ef --- /dev/null +++ b/server/ente/push.go @@ -0,0 +1,34 @@ +package ente + +import ( + "encoding/json" + "time" +) + +// PushTokenRequest represents a push token +type PushTokenRequest struct { + FCMToken string `json:"fcmToken" binding:"required"` + APNSToken string `json:"apnsToken"` + LastNotificationTime int64 +} + +type PushToken struct { + UserID int64 `json:"userID"` + FCMToken string `json:"fcmToken"` + CreatedAt int64 `json:"createdAt"` + LastNotifiedAt int64 `json:"lastNotifiedAt"` +} + +func (pt *PushToken) MarshalJSON() ([]byte, error) { + return json.Marshal(&struct { + UserID int64 `json:"userID"` + TrimmedToken string `json:"trimmedToken"` + CreatedAt string `json:"createdAt"` + LastNotifiedAt string `json:"LastNotifiedAt"` + }{ + UserID: pt.UserID, + TrimmedToken: pt.FCMToken[0:9], + CreatedAt: time.Unix(pt.CreatedAt/1000000, 0).String(), + LastNotifiedAt: time.Unix(pt.LastNotifiedAt/1000000, 0).String(), + }) +} diff --git a/server/ente/remotestore.go b/server/ente/remotestore.go new file mode 100644 index 000000000..02eb93232 --- /dev/null +++ b/server/ente/remotestore.go @@ -0,0 +1,15 @@ +package ente + +type GetValueRequest struct { + Key string `form:"key" binding:"required"` + DefaultValue *string `form:"defaultValue"` +} + +type GetValueResponse struct { + Value string `json:"value" binding:"required"` +} + +type UpdateKeyValueRequest struct { + Key string `json:"key" binding:"required"` + Value string `json:"value" binding:"required"` +} diff --git a/server/ente/srp.go b/server/ente/srp.go new file mode 100644 index 000000000..b1d533443 --- /dev/null +++ b/server/ente/srp.go @@ -0,0 +1,100 @@ +package ente + +import ( + "github.com/google/uuid" +) + +type SetupSRPRequest struct { + SrpUserID uuid.UUID `json:"srpUserID" binding:"required"` + SRPSalt string `json:"srpSalt" binding:"required"` + SRPVerifier string `json:"srpVerifier" binding:"required"` + SRPA string `json:"srpA" binding:"required"` +} + +type SetupSRPResponse struct { + SetupID uuid.UUID `json:"setupID" binding:"required"` + SRPB string `json:"srpB" binding:"required"` +} + +type CompleteSRPSetupRequest struct { + SetupID uuid.UUID `json:"setupID" binding:"required"` + SRPM1 string `json:"srpM1" binding:"required"` +} + +type CompleteSRPSetupResponse struct { + SetupID uuid.UUID `json:"setupID" binding:"required"` + SRPM2 string `json:"srpM2" binding:"required"` +} + +// UpdateSRPAndKeysRequest is used to update the SRP attributes (e.g. when user updates his password) and also +// update the keys attributes +type UpdateSRPAndKeysRequest struct { + SetupID uuid.UUID `json:"setupID" binding:"required"` + SRPM1 string `json:"srpM1" binding:"required"` + UpdateAttributes *UpdateKeysRequest `json:"updatedKeyAttr"` + LogOutOtherDevices *bool `json:"logOutOtherDevices"` +} + +type UpdateSRPSetupResponse struct { + SetupID uuid.UUID `json:"setupID" binding:"required"` + SRPM2 string `json:"srpM2" binding:"required"` +} + +type GetSRPAttributesRequest struct { + Email string `form:"email" binding:"required"` +} + +type GetSRPAttributesResponse struct { + SRPUserID string `json:"srpUserID" binding:"required"` + SRPSalt string `json:"srpSalt" binding:"required"` + // MemLimit,OpsLimit,KekSalt are needed to derive the KeyEncryptionKey + // on the client. Client generates the LoginKey from the KeyEncryptionKey + // and treat that as UserInputPassword. + MemLimit int `json:"memLimit" binding:"required"` + OpsLimit int `json:"opsLimit" binding:"required"` + KekSalt string `json:"kekSalt" binding:"required"` + IsEmailMFAEnabled bool `json:"isEmailMFAEnabled" binding:"required"` +} + +type CreateSRPSessionRequest struct { + SRPUserID uuid.UUID `json:"srpUserID" binding:"required"` + SRPA string `json:"srpA" binding:"required"` +} + +type CreateSRPSessionResponse struct { + SessionID uuid.UUID `json:"sessionID" binding:"required"` + SRPB string `json:"srpB" binding:"required"` +} + +type VerifySRPSessionRequest struct { + SessionID uuid.UUID `json:"sessionID" binding:"required"` + SRPUserID uuid.UUID `json:"srpUserID" binding:"required"` + SRPM1 string `json:"srpM1"` +} + +// SRPSessionEntity represents a row in the srp_sessions table +type SRPSessionEntity struct { + ID uuid.UUID + SRPUserID uuid.UUID + UserID int64 + ServerKey string + SRP_A string + IsVerified bool + AttemptCount int32 +} + +type SRPAuthEntity struct { + UserID int64 + SRPUserID uuid.UUID + Salt string + Verifier string +} + +type SRPSetupEntity struct { + ID uuid.UUID + SessionID uuid.UUID + SRPUserID uuid.UUID + UserID int64 + Salt string + Verifier string +} diff --git a/server/ente/storagebonus/errors.go b/server/ente/storagebonus/errors.go new file mode 100644 index 000000000..3312a8c52 --- /dev/null +++ b/server/ente/storagebonus/errors.go @@ -0,0 +1,39 @@ +package storagebonus + +import ( + "net/http" + + "github.com/ente-io/museum/ente" +) + +const ( + invalid ente.ErrorCode = "INVALID_CODE" + codeApplied ente.ErrorCode = "CODE_ALREADY_APPLIED" + codeExists ente.ErrorCode = "CODE_ALREADY_EXISTS" + accountNotEligible ente.ErrorCode = "ACCOUNT_NOT_ELIGIBLE" +) + +// InvalidCodeErr is thrown when user gives a code which either doesn't exist or belong to a now deleted user +var InvalidCodeErr = &ente.ApiError{ + Code: invalid, + Message: "Invalid code", + HttpStatusCode: http.StatusNotFound, +} + +var CodeAlreadyAppliedErr = &ente.ApiError{ + Code: codeApplied, + Message: "User has already applied code", + HttpStatusCode: http.StatusConflict, +} + +var CanNotApplyCodeErr = &ente.ApiError{ + Code: accountNotEligible, + Message: "User is not eligible to apply referral code", + HttpStatusCode: http.StatusBadRequest, +} + +var CodeAlreadyExistsErr = &ente.ApiError{ + Code: codeExists, + Message: "This code already exists", + HttpStatusCode: http.StatusBadRequest, +} diff --git a/server/ente/storagebonus/referral.go b/server/ente/storagebonus/referral.go new file mode 100644 index 000000000..816e0ca4e --- /dev/null +++ b/server/ente/storagebonus/referral.go @@ -0,0 +1,54 @@ +package storagebonus + +// Tracking represents entity used to track various referral history +type Tracking struct { + // UserID of the user who invited the other person + Invitor int64 + // UserID of the user who's invited by invitor + Invitee int64 + // CreatedAt time when the user applied the code + CreatedAt int64 + + PlanType PlanType +} + +type UserReferralPlanStat struct { + PlanType PlanType `json:"planType"` + TotalCount int `json:"totalCount"` + UpgradedCount int `json:"upgradedCount"` +} + +// PlanInfo represents the referral plan metadata +type PlanInfo struct { + // IsEnabled indicates if the referral plan is enabled for given user + IsEnabled bool `json:"isEnabled"` + // Referral plan type + PlanType PlanType `json:"planType"` + // Storage which can be gained on successfully referral + StorageInGB int64 `json:"storageInGB"` + // Max storage which can be claimed by the user + MaxClaimableStorageInGB int64 `json:"maxClaimableStorageInGB"` +} + +type GetStorageBonusDetailResponse struct { + ReferralStats []UserReferralPlanStat `json:"referralStats"` + Bonuses []StorageBonus `json:"bonuses"` + RefCount int `json:"refCount"` + RefUpgradeCount int `json:"refUpgradeCount"` + // Indicates if the user applied code during signup + HasAppliedCode bool `json:"hasAppliedCode"` +} + +// GetUserReferralView represents the basic view of the user's referral plan +// This is used to show the user's referral details in the UI +type GetUserReferralView struct { + PlanInfo PlanInfo `json:"planInfo"` + Code *string `json:"code"` + // Indicates if the user can apply the referral code. + EnableApplyCode bool `json:"enableApplyCode"` + HasAppliedCode bool `json:"hasAppliedCode"` + // Indicates claimed referral storage + ClaimedStorage int64 `json:"claimedStorage"` + // Indicates if the user is part of a family and is the admin + IsFamilyMember bool `json:"isFamilyMember"` +} diff --git a/server/ente/storagebonus/referral_type.go b/server/ente/storagebonus/referral_type.go new file mode 100644 index 000000000..15afc3a9e --- /dev/null +++ b/server/ente/storagebonus/referral_type.go @@ -0,0 +1,46 @@ +package storagebonus + +import ( + "fmt" +) + +type PlanType string + +const ( + // TenGbOnUpgrade plan when both the parties get 10 GB surplus storage. + // The invitee gets 10 GB storage on successful signup + // The invitor gets 10 GB storage only after the invitee upgrades to a paid plan + TenGbOnUpgrade PlanType = "10_GB_ON_UPGRADE" +) + +// SignUpInviteeBonus returns the storage which can be gained by the invitee on successful signup with a referral code +func (c PlanType) SignUpInviteeBonus() int64 { + switch c { + case TenGbOnUpgrade: + return 10 * 1024 * 1024 * 1024 + default: + panic(fmt.Sprintf("SignUpInviteeBonus value not configured for %s", c)) + } +} + +// SignUpInvitorBonus returns the storage which can be gained by the invitor when some sign ups using their code +func (c PlanType) SignUpInvitorBonus() int64 { + switch c { + case TenGbOnUpgrade: + return 0 + default: + // panic if the plan type is not supported + panic("unsupported plan type") + } +} + +// InvitorBonusOnInviteeUpgrade returns the storage which can be gained by the invitor when the invitee upgrades to a paid plan +func (c PlanType) InvitorBonusOnInviteeUpgrade() int64 { + switch c { + case TenGbOnUpgrade: + return 10 * 1024 * 1024 * 1024 + default: + // panic if the plan type is not supported + panic("unsupported plan type") + } +} diff --git a/server/ente/storagebonus/storge_bonus.go b/server/ente/storagebonus/storge_bonus.go new file mode 100644 index 000000000..9a876fb4d --- /dev/null +++ b/server/ente/storagebonus/storge_bonus.go @@ -0,0 +1,129 @@ +package storagebonus + +type BonusType string + +const ( + // Referral bonus is gained by inviting others + Referral BonusType = "REFERRAL" + // SignUp for applying code shared by others during sign up + // Note: In the future, for surplus types which should be only applied once, we can add unique constraints + SignUp BonusType = "SIGN_UP" + + // AddOnSupport is the bonus for users added by the support team + AddOnSupport = "ADD_ON_SUPPORT" + // AddOnBf2023 is the bonus for users who have opted for the Black Friday 2023 offer + AddOnBf2023 = "ADD_ON_BF_2023" + // In the future, we can add various types of bonuses based on different events like Anniversary, + // or finishing tasks like ML indexing, enabling sharing etc etc +) + +// PaidAddOnTypes : These add-ons can be purchased by the users and help in the expiry of an account +// as long as the add-on is active. +var PaidAddOnTypes = []BonusType{AddOnSupport, AddOnBf2023} + +// ExtendsExpiry returns true if the bonus type extends the expiry of the account. +// By default, all bonuses don't extend expiry. +func (t BonusType) ExtendsExpiry() bool { + switch t { + case AddOnSupport, AddOnBf2023: + return true + case Referral, SignUp: + return false + default: + return false + } +} + +// RestrictToDoublingStorage returns true if the bonus type restricts the doubling of storage. +// This indicates, the usable bonus storage should not exceed the current plan storage. +// Note: Current plan storage includes both base subscription and storage bonus that can ExtendsExpiry +func (t BonusType) RestrictToDoublingStorage() bool { + switch t { + case Referral, SignUp: + return true + case AddOnSupport, AddOnBf2023: + return false + default: + return true + } +} + +type RevokeReason string + +const ( + Fraud RevokeReason = "FRAUD" + // Expired is usually used to take away one time bonus. + Expired RevokeReason = "EXPIRED" + // Discontinued Used when storagebonus is taken away before other user deleted their account + // or stopped subscription or user decides to pause subscription after anniversary gift + Discontinued RevokeReason = "DISCONTINUED" +) + +type StorageBonus struct { + UserID int64 `json:"-"` + // Amount of storage bonus added to the account + Storage int64 `json:"storage"` + Type BonusType `json:"type"` + CreatedAt int64 `json:"createdAt"` + UpdatedAt int64 `json:"-"` + // ValidTill represents the validity of the storage bonus. If it is 0, it is valid forever. + ValidTill int64 `json:"validTill"` + RevokeReason *RevokeReason `json:"-"` + IsRevoked bool `json:"isRevoked"` +} + +type ActiveStorageBonus struct { + StorageBonuses []StorageBonus `json:"storageBonuses"` +} + +func (a *ActiveStorageBonus) GetMaxExpiry() int64 { + if a == nil { + return 0 + } + maxExpiry := int64(0) + for _, bonus := range a.StorageBonuses { + if bonus.Type.ExtendsExpiry() && bonus.ValidTill > maxExpiry { + maxExpiry = bonus.ValidTill + } + } + return maxExpiry +} + +func (a *ActiveStorageBonus) GetReferralBonus() int64 { + if a == nil { + return 0 + } + referralBonus := int64(0) + for _, bonus := range a.StorageBonuses { + if bonus.Type.RestrictToDoublingStorage() { + referralBonus += bonus.Storage + } + } + return referralBonus +} + +func (a *ActiveStorageBonus) GetAddonStorage() int64 { + if a == nil { + return 0 + } + addonStorage := int64(0) + for _, bonus := range a.StorageBonuses { + if !bonus.Type.RestrictToDoublingStorage() { + addonStorage += bonus.Storage + } + } + return addonStorage +} + +func (a *ActiveStorageBonus) GetUsableBonus(subStorage int64) int64 { + refBonus := a.GetReferralBonus() + totalSubAndAddOnStorage := a.GetAddonStorage() + subStorage + if refBonus > totalSubAndAddOnStorage { + refBonus = totalSubAndAddOnStorage + } + return a.GetAddonStorage() + refBonus +} + +type GetBonusResult struct { + StorageBonuses []StorageBonus +} diff --git a/server/ente/trash.go b/server/ente/trash.go new file mode 100644 index 000000000..52bb2c758 --- /dev/null +++ b/server/ente/trash.go @@ -0,0 +1,47 @@ +package ente + +// Trash indicates a trashed file in the system. +type Trash struct { + File File `json:"file"` + IsDeleted bool `json:"isDeleted"` + IsRestored bool `json:"isRestored"` + DeleteBy int64 `json:"deleteBy"` + CreatedAt int64 `json:"createdAt"` + UpdatedAt int64 `json:"updatedAt"` +} + +// DeleteTrashFilesRequest represents a request to delete a trashed files +type DeleteTrashFilesRequest struct { + FileIDs []int64 `json:"fileIDs" binding:"required"` + // OwnerID will be set based on the authenticated user + OwnerID int64 +} + +// EmptyTrashRequest represents a request to empty items from user's trash +type EmptyTrashRequest struct { + // LastUpdatedAt timestamp will be used to delete trashed files with updatedAt timestamp <= LastUpdatedAt + // User's trash will be cleaned up in an async manner. The timestamp is used to ensure that newly trashed files + // are not deleted due to delay in the async operation. + LastUpdatedAt int64 `json:"lastUpdatedAt" binding:"required"` +} + +// TrashCollectionV3Request represents the request for trashing/deleting a collection. +// In V3, while trashing/deleting any album, the user can decide to either keep or delete the all files which are +// present in to the trash. When user wants to keep the files, the clients are expected to move all the files from +// the underlying collection to any other collection owned by the user, inlcuding uncategorized. +// Note: Collection Delete Versions for DELETE /collections/V../ endpoint +// V1: All files which exclusively belong to the collections are deleted immediately. +// V2: All files which exclusively belong to the collections are moved to the trash. +// V3: All files which are still present in the collection (irrespective if they blong to another collection) will be moved to trash. +// V3 is introduced to avoid doing this booking on server, where we only delete a file when it's beling removed from the last collection it longs to. +// In theory above logic to delete when it's being removed from last collection sounds good. But, +// in practice it complicates the code (thus reducing its robustness) because of race conditions, and it's +// also hard to communicate it to the user. So, to simplify things, in V3, the files will be only deleted when user tell us to delete them. +type TrashCollectionV3Request struct { + CollectionID int64 `json:"collectionID" form:"collectionID" binding:"required"` + // When KeepFiles is false, then all the files which are present in the collection will be moved to trash. + // When KeepFiles is true, but the underlying collection still contains file, then the API call will fail. + // This is to ensure that before deleting the collection, the client has moved all relevant files to any other + // collection owned by the user, including Uncategorized. + KeepFiles *bool `json:"keepFiles" form:"keepFiles" binding:"required"` +} diff --git a/server/ente/user.go b/server/ente/user.go new file mode 100644 index 000000000..5d80dc983 --- /dev/null +++ b/server/ente/user.go @@ -0,0 +1,213 @@ +package ente + +const ( + PhotosOTTTemplate = "ott_photos.html" + + AuthOTTTemplate = "ott_auth.html" + + ChangeEmailOTTTemplate = "ott_change_email.html" + EmailChangedTemplate = "email_changed.html" + EmailChangedSubject = "Email address updated" + + // OTTEmailSubject is the subject of the OTT mail + OTTEmailSubject = "ente Verification Code" + + ChangeEmailOTTPurpose = "change" +) + +// User represents a user in the system +type User struct { + ID int64 + Email string `json:"email"` + Name string `json:"name"` + Hash string `json:"hash"` + CreationTime int64 `json:"creationTime"` + FamilyAdminID *int64 `json:"familyAdminID"` + IsTwoFactorEnabled *bool `json:"isTwoFactorEnabled"` + IsEmailMFAEnabled *bool `json:"isEmailMFAEnabled"` +} + +// A request to generate and send a verification code (OTT) +type SendOTTRequest struct { + Email string `json:"email"` + Client string `json:"client"` + Purpose string `json:"purpose"` +} + +// EmailVerificationRequest represents an email verification request +type EmailVerificationRequest struct { + Email string `json:"email"` + OTT string `json:"ott"` + // Indicates where the source form where the user heard about the service + Source *string `json:"source"` +} + +type EmailVerificationResponse struct { + ID int64 `json:"id"` + Token string `json:"token"` + KeyAttributes KeyAttributes `json:"keyAttributes"` + Subscription Subscription `json:"subscription"` +} + +// EmailAuthorizationResponse represents the response after user has verified his email, +// if two factor enabled just `TwoFactorSessionID` is sent else the keyAttributes and encryptedToken +type EmailAuthorizationResponse struct { + ID int64 `json:"id"` + KeyAttributes *KeyAttributes `json:"keyAttributes,omitempty"` + EncryptedToken string `json:"encryptedToken,omitempty"` + Token string `json:"token,omitempty"` + PasskeySessionID string `json:"passkeySessionID"` + TwoFactorSessionID string `json:"twoFactorSessionID"` + // SrpM2 is sent only if the user is logging via SRP + // SrpM2 is the SRP M2 value aka the proof that the server has the verifier + SrpM2 *string `json:"srpM2,omitempty"` +} + +// KeyAttributes stores the key related attributes for a user +type KeyAttributes struct { + KEKSalt string `json:"kekSalt" binding:"required"` + KEKHash string `json:"kekHash"` + EncryptedKey string `json:"encryptedKey" binding:"required"` + KeyDecryptionNonce string `json:"keyDecryptionNonce" binding:"required"` + PublicKey string `json:"publicKey" binding:"required"` + EncryptedSecretKey string `json:"encryptedSecretKey" binding:"required"` + SecretKeyDecryptionNonce string `json:"secretKeyDecryptionNonce" binding:"required"` + MemLimit int `json:"memLimit" binding:"required"` + OpsLimit int `json:"opsLimit" binding:"required"` + MasterKeyEncryptedWithRecoveryKey string `json:"masterKeyEncryptedWithRecoveryKey"` + MasterKeyDecryptionNonce string `json:"masterKeyDecryptionNonce"` + RecoveryKeyEncryptedWithMasterKey string `json:"recoveryKeyEncryptedWithMasterKey"` + RecoveryKeyDecryptionNonce string `json:"recoveryKeyDecryptionNonce"` +} + +// SetUserAttributesRequest represents an incoming request to set UA +type SetUserAttributesRequest struct { + KeyAttributes KeyAttributes `json:"keyAttributes" binding:"required"` +} + +// UpdateEmailMFA .. +type UpdateEmailMFA struct { + IsEnabled *bool `json:"isEnabled" binding:"required"` +} + +// UpdateKeysRequest represents a request to set user keys +type UpdateKeysRequest struct { + KEKSalt string `json:"kekSalt" binding:"required"` + EncryptedKey string `json:"encryptedKey" binding:"required"` + KeyDecryptionNonce string `json:"keyDecryptionNonce" binding:"required"` + MemLimit int `json:"memLimit" binding:"required"` + OpsLimit int `json:"opsLimit" binding:"required"` +} + +type SetRecoveryKeyRequest struct { + MasterKeyEncryptedWithRecoveryKey string `json:"masterKeyEncryptedWithRecoveryKey"` + MasterKeyDecryptionNonce string `json:"masterKeyDecryptionNonce"` + RecoveryKeyEncryptedWithMasterKey string `json:"recoveryKeyEncryptedWithMasterKey"` + RecoveryKeyDecryptionNonce string `json:"recoveryKeyDecryptionNonce"` +} + +type EventReportRequest struct { + Event string `json:"event"` +} + +type EncryptionResult struct { + Cipher []byte + Nonce []byte +} + +type DeleteChallengeResponse struct { + // AllowDelete indicates whether the user is allowed to delete their account via app + AllowDelete bool `json:"allowDelete"` + EncryptedChallenge *string `json:"encryptedChallenge,omitempty"` +} + +type DeleteAccountRequest struct { + Challenge string `json:"challenge"` + Feedback *string `json:"feedback"` + ReasonCategory *string `json:"reasonCategory"` + Reason *string `json:"reason"` +} + +func (r *DeleteAccountRequest) GetReasonAttr() map[string]string { + result := make(map[string]string) + // Note: mobile client is sending reasonCategory, but web/desktop is sending reason + if r.ReasonCategory != nil { + result["reason"] = *r.ReasonCategory + } + if r.Reason != nil { + result["reason"] = *r.Reason + } + if r.Feedback != nil { + result["feedback"] = *r.Feedback + } + return result +} + +type DeleteAccountResponse struct { + IsSubscriptionCancelled bool `json:"isSubscriptionCancelled"` + UserID int64 `json:"userID"` +} + +// TwoFactorSecret represents the two factor secret generator value, user enters in his authenticator app +type TwoFactorSecret struct { + SecretCode string `json:"secretCode"` + QRCode string `json:"qrCode"` +} + +// TwoFactorEnableRequest represent the user request to enable two factor after initial setup +type TwoFactorEnableRequest struct { + Code string `json:"code"` + EncryptedTwoFactorSecret string `json:"encryptedTwoFactorSecret"` + TwoFactorSecretDecryptionNonce string `json:"twoFactorSecretDecryptionNonce"` +} + +// TwoFactorVerificationRequest represents a two factor verification request +type TwoFactorVerificationRequest struct { + SessionID string `json:"sessionID" binding:"required"` + Code string `json:"code" binding:"required"` +} + +// TwoFactorBeginAuthenticationCeremonyRequest represents the request to begin the passkey authentication ceremony +type PasskeyTwoFactorBeginAuthenticationCeremonyRequest struct { + SessionID string `json:"sessionID" binding:"required"` +} + +type PasskeyTwoFactorFinishAuthenticationCeremonyRequest struct { + SessionID string `form:"sessionID" binding:"required"` + CeremonySessionID string `form:"ceremonySessionID" binding:"required"` +} + +// TwoFactorAuthorizationResponse represents the response after two factor authentication +type TwoFactorAuthorizationResponse struct { + ID int64 `json:"id"` + KeyAttributes *KeyAttributes `json:"keyAttributes,omitempty"` + EncryptedToken string `json:"encryptedToken,omitempty"` +} + +// TwoFactorRecoveryResponse represents the two factor secret encrypted with user's recovery key sent for user to make removal request +type TwoFactorRecoveryResponse struct { + EncryptedSecret string `json:"encryptedSecret"` + SecretDecryptionNonce string `json:"secretDecryptionNonce"` +} + +// TwoFactorRemovalRequest represents the the body of two factor removal request consist of decrypted two factor secret and sessionID +type TwoFactorRemovalRequest struct { + Secret string `json:"secret"` + SessionID string `json:"sessionID"` +} + +type ProfileData struct { + // CanDisableEmailMFA is used to decide if client should show disable email MFA option + CanDisableEmailMFA bool `json:"canDisableEmailMFA"` + IsEmailMFAEnabled bool `json:"isEmailMFAEnabled"` + IsTwoFactorEnabled bool `json:"isTwoFactorEnabled"` +} + +type Session struct { + Token string `json:"token"` + CreationTime int64 `json:"creationTime"` + IP string `json:"ip"` + UA string `json:"ua"` + PrettyUA string `json:"prettyUA"` + LastUsedTime int64 `json:"lastUsedTime"` +} diff --git a/server/ente/userentity/entity.go b/server/ente/userentity/entity.go new file mode 100644 index 000000000..729b69e45 --- /dev/null +++ b/server/ente/userentity/entity.go @@ -0,0 +1,66 @@ +package userentity + +import ( + "github.com/google/uuid" +) + +type EntityType string + +const ( + Location EntityType = "location" +) + +type EntityKey struct { + UserID int64 `json:"userID" binding:"required"` + Type EntityType `json:"type" binding:"required"` + EncryptedKey string `json:"encryptedKey" binding:"required"` + Header string `json:"header" binding:"required"` + CreatedAt int64 `json:"createdAt" binding:"required"` +} + +// EntityData represents a single UserEntity +type EntityData struct { + ID uuid.UUID `json:"id" binding:"required"` + UserID int64 `json:"userID" binding:"required"` + Type EntityType `json:"type" binding:"required"` + EncryptedData *string `json:"encryptedData" binding:"required"` + Header *string `json:"header" binding:"required"` + IsDeleted bool `json:"isDeleted" binding:"required"` + CreatedAt int64 `json:"createdAt" binding:"required"` + UpdatedAt int64 `json:"updatedAt" binding:"required"` +} + +// EntityKeyRequest represents a request to create entity data encryption key for a given EntityType +type EntityKeyRequest struct { + Type EntityType `json:"type" binding:"required"` + EncryptedKey string `json:"encryptedKey" binding:"required"` + Header string `json:"header" binding:"required"` +} + +// GetEntityKeyRequest represents a request to get entity key for given EntityType +type GetEntityKeyRequest struct { + Type EntityType `form:"type" binding:"required"` +} + +// EntityDataRequest is used to create a new entity data of given EntityType +type EntityDataRequest struct { + Type EntityType `json:"type" binding:"required"` + EncryptedData string `json:"encryptedData" binding:"required"` + Header string `json:"header" binding:"required"` +} + +// UpdateEntityDataRequest updates the current entity +type UpdateEntityDataRequest struct { + ID uuid.UUID `json:"id" binding:"required"` + Type EntityType `json:"type" binding:"required"` + EncryptedData string `json:"encryptedData" binding:"required"` + Header string `json:"header" binding:"required"` +} + +// GetEntityDiffRequest returns the diff of entities since the given time +type GetEntityDiffRequest struct { + Type EntityType `form:"type" binding:"required"` + // SinceTime *int64. Pointer allows us to pass 0 value otherwise binding fails for zero Value. + SinceTime *int64 `form:"sinceTime" binding:"required"` + Limit int16 `form:"limit" binding:"required"` +} diff --git a/server/ente/webauthnSession.go b/server/ente/webauthnSession.go new file mode 100644 index 000000000..ed4bcab3c --- /dev/null +++ b/server/ente/webauthnSession.go @@ -0,0 +1,63 @@ +package ente + +import ( + "bytes" + "encoding/binary" + "encoding/json" + "time" + + "github.com/ente-io/museum/pkg/utils/byteMarshaller" + "github.com/go-webauthn/webauthn/protocol" + "github.com/go-webauthn/webauthn/webauthn" + "github.com/google/uuid" +) + +// WebAuthnSession is a protocol level session that stores challenges and other metadata during registration and login ceremonies +type WebAuthnSession struct { + ID uuid.UUID + + Challenge string + + UserID int64 + + AllowedCredentialIDs string // [][]byte as b64 + + ExpiresAt int64 + + UserVerificationRequirement string + + Extensions string // map[string]interface{} as json + + CreatedAt int64 +} + +func (w *WebAuthnSession) SessionData() (session *webauthn.SessionData, err error) { + buf := new(bytes.Buffer) + err = binary.Write(buf, binary.BigEndian, w.UserID) + if err != nil { + return + } + + allowedCredentialIDs, err := byteMarshaller.DecodeString(w.AllowedCredentialIDs) + if err != nil { + return + } + + extensions := map[string]interface{}{} + err = json.Unmarshal([]byte(w.Extensions), &extensions) + if err != nil { + return + } + + session = &webauthn.SessionData{ + Challenge: w.Challenge, + UserID: buf.Bytes(), + AllowedCredentialIDs: allowedCredentialIDs, + Expires: time.UnixMicro(w.ExpiresAt), + + UserVerification: protocol.UserVerificationRequirement(w.UserVerificationRequirement), + Extensions: extensions, + } + + return +} diff --git a/server/go.mod b/server/go.mod new file mode 100644 index 000000000..d034a02b5 --- /dev/null +++ b/server/go.mod @@ -0,0 +1,123 @@ +module github.com/ente-io/museum + +go 1.20 + +require ( + firebase.google.com/go v3.13.0+incompatible + github.com/GoKillers/libsodium-go v0.0.0-20171022220152-dd733721c3cb + github.com/avct/uasurfer v0.0.0-20191028135549-26b5daa857f1 + github.com/awa/go-iap v1.3.16 + github.com/aws/aws-sdk-go v1.34.13 + github.com/bwmarrin/discordgo v0.25.0 + github.com/dlmiddlecote/sqlstats v1.0.2 + github.com/ente-io/stacktrace v0.0.0-20210619050357-0af9fad4639c + github.com/gin-contrib/gzip v0.0.5 + github.com/gin-contrib/requestid v0.0.2-0.20210619060739-3f23d9a07dc5 + github.com/gin-contrib/timeout v0.0.3 + github.com/gin-gonic/gin v1.9.1 + github.com/go-playground/validator/v10 v10.14.0 + github.com/golang-jwt/jwt v3.2.1+incompatible + github.com/golang-migrate/migrate/v4 v4.12.2 + github.com/google/go-cmp v0.6.0 + github.com/google/uuid v1.4.0 + github.com/kong/go-srp v0.0.0-20191210190804-cde1efa3c083 + github.com/lib/pq v1.8.0 + github.com/lithammer/shortuuid/v3 v3.0.4 + github.com/patrickmn/go-cache v2.1.0+incompatible + github.com/pquerna/otp v1.3.0 + github.com/prometheus/client_golang v1.11.1 + github.com/prometheus/common v0.26.0 + github.com/robfig/cron/v3 v3.0.1 + github.com/sirupsen/logrus v1.6.0 + github.com/spf13/viper v1.8.1 + github.com/stretchr/testify v1.8.4 + github.com/stripe/stripe-go/v72 v72.37.0 + github.com/ua-parser/uap-go v0.0.0-20211112212520-00c877edfe0f + github.com/ulule/limiter/v3 v3.8.0 + github.com/zsais/go-gin-prometheus v0.1.0 + golang.org/x/crypto v0.17.0 + golang.org/x/sync v0.1.0 + golang.org/x/text v0.14.0 + google.golang.org/api v0.114.0 + gopkg.in/natefinch/lumberjack.v2 v2.0.0 +) + +require ( + cloud.google.com/go/compute/metadata v0.2.3 // indirect + cloud.google.com/go/longrunning v0.4.1 // indirect + github.com/bytedance/sonic v1.9.1 // indirect + github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect + github.com/fxamacker/cbor/v2 v2.5.0 // indirect + github.com/gabriel-vasile/mimetype v1.4.2 // indirect + github.com/go-webauthn/x v0.1.5 // indirect + github.com/goccy/go-json v0.10.2 // indirect + github.com/golang-jwt/jwt/v5 v5.2.0 // indirect + github.com/google/go-tpm v0.9.0 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect + github.com/klauspost/cpuid/v2 v2.2.4 // indirect + github.com/pelletier/go-toml/v2 v2.0.8 // indirect + github.com/twitchyliquid64/golang-asm v0.15.1 // indirect + github.com/x448/float16 v0.8.4 // indirect + golang.org/x/arch v0.3.0 // indirect + golang.org/x/time v0.1.0 // indirect +) + +require ( + cloud.google.com/go v0.110.0 // indirect + cloud.google.com/go/compute v1.19.1 // indirect + cloud.google.com/go/firestore v1.9.0 // indirect + cloud.google.com/go/iam v0.13.0 // indirect + cloud.google.com/go/storage v1.28.1 // indirect + github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 // indirect + github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/fsnotify/fsnotify v1.4.9 // indirect + github.com/gin-contrib/sse v0.1.0 // indirect + github.com/go-playground/locales v0.14.1 // indirect + github.com/go-playground/universal-translator v0.18.1 // indirect + github.com/go-webauthn/webauthn v0.9.4 + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/protobuf v1.5.3 // indirect + github.com/googleapis/gax-go/v2 v2.7.1 // indirect + github.com/gorilla/websocket v1.4.2 // indirect + github.com/hashicorp/errwrap v1.0.0 // indirect + github.com/hashicorp/go-multierror v1.1.0 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/jmespath/go-jmespath v0.3.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/konsorten/go-windows-terminal-sequences v1.0.3 // indirect + github.com/leodido/go-urn v1.2.4 // indirect + github.com/magiconair/properties v1.8.5 // indirect + github.com/mattn/go-isatty v0.0.19 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/pelletier/go-toml v1.9.3 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/prometheus/client_model v0.2.0 // indirect + github.com/prometheus/procfs v0.6.0 // indirect + github.com/spf13/afero v1.6.0 // indirect + github.com/spf13/cast v1.3.1 // indirect + github.com/spf13/jwalterweatherman v1.1.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/subosito/gotenv v1.2.0 // indirect + github.com/ugorji/go/codec v1.2.11 // indirect + go.opencensus.io v0.24.0 // indirect + golang.org/x/net v0.17.0 // indirect + golang.org/x/oauth2 v0.7.0 // indirect + golang.org/x/sys v0.15.0 // indirect + golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect + google.golang.org/appengine v1.6.7 // indirect + google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect + google.golang.org/grpc v1.56.3 // indirect + google.golang.org/protobuf v1.30.0 // indirect + gopkg.in/alecthomas/kingpin.v2 v2.2.6 // indirect + gopkg.in/ini.v1 v1.62.0 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) diff --git a/server/go.sum b/server/go.sum new file mode 100644 index 000000000..46783cfea --- /dev/null +++ b/server/go.sum @@ -0,0 +1,1097 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.60.0/go.mod h1:yw2G51M9IfRboUH61Us8GqCeF1PzPblB823Mn2q2eAU= +cloud.google.com/go v0.61.0/go.mod h1:XukKJg4Y7QsUu0Hxg3qQKUWR4VuWivmyMK2+rUyxAqw= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go v0.110.0 h1:Zc8gqp3+a9/Eyph2KDmcGaPtbKRIoqq4YTlL4NMD0Ys= +cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/compute v1.19.1 h1:am86mquDUgjGNWxiGn+5PGLbmgiWXlE/yNWpIpNvuXY= +cloud.google.com/go/compute v1.19.1/go.mod h1:6ylj3a05WF8leseCdIf77NK0g1ey+nj5IKd5/kvShxE= +cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= +cloud.google.com/go/firestore v1.9.0 h1:IBlRyxgGySXu5VuW0RgGFlTtLukSnNkpDiEOMkQkmpA= +cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= +cloud.google.com/go/iam v0.13.0 h1:+CmB+K0J/33d0zSQ9SlFWUeCCEn5XJA0ZMZ3pHE9u8k= +cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0= +cloud.google.com/go/longrunning v0.4.1 h1:v+yFJOfKC3yZdY6ZUI933pIYdhyhV8S3NpWrXWmg7jM= +cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/spanner v1.8.0/go.mod h1:mdAPDiFUbE9vCmhHHlxyDUtaPPsIK+pUdf5KmHaUfT8= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.28.1 h1:F5QDG5ChchaAVQhINh24U99OWHURqrW8OmQcGKXcbgI= +cloud.google.com/go/storage v1.28.1/go.mod h1:Qnisd4CqDdo6BGs2AD5LLnEsmSQ80wQ5ogcBBKhU86Y= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +firebase.google.com/go v3.13.0+incompatible h1:3TdYC3DDi6aHn20qoRkxwGqNgdjtblwVAyRLQwGn/+4= +firebase.google.com/go v3.13.0+incompatible/go.mod h1:xlah6XbEyW6tbfSklcfe5FHJIwjt8toICdV5Wh9ptHs= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/ClickHouse/clickhouse-go v1.3.12/go.mod h1:EaI/sW7Azgz9UATzd5ZdZHRUhHgv5+JMS9NSr2smCJI= +github.com/GoKillers/libsodium-go v0.0.0-20171022220152-dd733721c3cb h1:ilqSFSbR1fq6x88heeHrvAqlg+ES+tZk2ZcaCmiH1gI= +github.com/GoKillers/libsodium-go v0.0.0-20171022220152-dd733721c3cb/go.mod h1:72TQeEkiDH9QMXZa5nJJvZre0UjqqO67X2QEIoOwCRU= +github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= +github.com/Microsoft/go-winio v0.4.14 h1:+hMXMk01us9KgxGb7ftKQt2Xpf5hH/yky+TDA+qxleU= +github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d h1:UQZhZ2O0vMHr2cI+DC1Mbh0TJxzA3RcLoMsFw+aXw7E= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/andybalholm/brotli v1.0.0/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/apache/arrow/go/arrow v0.0.0-20200601151325-b2287a20f230/go.mod h1:QNYViu/X0HXDHw7m3KXzWSVXIbfUvJqBFe6Gj8/pYA0= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/avct/uasurfer v0.0.0-20191028135549-26b5daa857f1 h1:9h8f71kuF1pqovnn9h7LTHLEjxzyQaj0j1rQq5nsMM4= +github.com/avct/uasurfer v0.0.0-20191028135549-26b5daa857f1/go.mod h1:noBAuukeYOXa0aXGqxr24tADqkwDO2KRD15FsuaZ5a8= +github.com/awa/go-iap v1.3.16 h1:zl94AXQwY7qVUhdFQcmbuACsf3Denva243DfE+Cqs98= +github.com/awa/go-iap v1.3.16/go.mod h1:0lGM0xb6zSFa2lIPx2j/rrNIFFhLEF4/cy1h319olRM= +github.com/aws/aws-sdk-go v1.17.7/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.34.13 h1:wwNWSUh4FGJxXVOVVNj2lWI8wTe5hK8sGWlK7ziEcgg= +github.com/aws/aws-sdk-go v1.34.13/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k= +github.com/bkaradzic/go-lz4 v1.0.0/go.mod h1:0YdlkowM3VswSROI7qDxhRvJ3sLhlFrRRwjwegp5jy4= +github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= +github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= +github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc h1:biVzkmvwrH8WK8raXaxBx6fRVTlJILwEwQGL1I/ByEI= +github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= +github.com/bwmarrin/discordgo v0.25.0 h1:NXhdfHRNxtwso6FPdzW2i3uBvvU7UIQTghmV2T4nqAs= +github.com/bwmarrin/discordgo v0.25.0/go.mod h1:NJZpH+1AfhIcyQsPeuBKsUtYrRnjkyu0kIVMCHkZtRY= +github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1O2AihPM= +github.com/bytedance/sonic v1.9.1 h1:6iJ6NqdoxCDr6mbY8h18oSO+cShGSMRGCEo7F2h0x8s= +github.com/bytedance/sonic v1.9.1/go.mod h1:i736AoUSYt75HyZLoJW9ERYxcy6eaN6h4BZXU064P/U= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY= +github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 h1:qSGYFH7+jGhDF8vLC+iwCD4WpbV1EBDSzWkJODFLams= +github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58/go.mod h1:EOBUe0h4xcZ5GoxqC5SDxFQ8gwyZPKQoEzownBlhI80= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= +github.com/cockroachdb/cockroach-go v0.0.0-20190925194419-606b3d062051/go.mod h1:XGLbWH/ujMcbPbhZq52Nv6UrCghb1yGn//133kEsvDk= +github.com/containerd/containerd v1.3.3 h1:LoIzb5y9x5l8VKAlyrbusNPXqBY0+kviRloxFUMFwKc= +github.com/containerd/containerd v1.3.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/cznic/mathutil v0.0.0-20180504122225-ca4c9f2c1369/go.mod h1:e6NPNENfs9mPDVNRekM7lKScauxd5kXTr1Mfyig6TDM= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/denisenkom/go-mssqldb v0.0.0-20200620013148-b91950f658ec/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= +github.com/dhui/dktest v0.3.2 h1:nZSDcnkpbotzT/nEHNsO+JCKY8i1Qoki1AYOpeLRb6M= +github.com/dhui/dktest v0.3.2/go.mod h1:l1/ib23a/CmxAe7yixtrYPc8Iy90Zy2udyaHINM5p58= +github.com/dlmiddlecote/sqlstats v1.0.2 h1:gSU11YN23D/iY50A2zVYwgXgy072khatTsIW6UPjUtI= +github.com/dlmiddlecote/sqlstats v1.0.2/go.mod h1:0CWaIh/Th+z2aI6Q9Jpfg/o21zmGxWhbByHgQSCUQvY= +github.com/docker/distribution v2.7.0+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= +github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v1.4.2-0.20200213202729-31a86c4ab209 h1:tmV+YbYOUAYDmAiamzhRKqQXaAUyUY2xVt27Rv7rCzA= +github.com/docker/docker v1.4.2-0.20200213202729-31a86c4ab209/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= +github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/edsrzf/mmap-go v0.0.0-20170320065105-0bce6a688712/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= +github.com/ente-io/stacktrace v0.0.0-20210619050357-0af9fad4639c h1:85Bb8MlDOpKA8x2hmPd/pQnvtlcJcQPon+ocQAK17Fs= +github.com/ente-io/stacktrace v0.0.0-20210619050357-0af9fad4639c/go.mod h1:Kejqc+CuvGUwtgTAsYSDXeTPgAYilDbExwvibbGzmIg= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsouza/fake-gcs-server v1.17.0/go.mod h1:D1rTE4YCyHFNa99oyJJ5HyclvN/0uQR+pM/VdlL83bw= +github.com/fxamacker/cbor/v2 v2.5.0 h1:oHsG0V/Q6E/wqTS2O1Cozzsy69nqCiguo5Q1a1ADivE= +github.com/fxamacker/cbor/v2 v2.5.0/go.mod h1:TA1xS00nchWmaBnEIxPSE5oHLuJBAVvqrtAnWBwBCVo= +github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU= +github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gin-contrib/gzip v0.0.5 h1:mhnVU32YnnBh2LPH2iqRqsA/eR7SAqRaD388jL2s/j0= +github.com/gin-contrib/gzip v0.0.5/go.mod h1:OPIK6HR0Um2vNmBUTlayD7qle4yVVRZT0PyhdUigrKk= +github.com/gin-contrib/requestid v0.0.2-0.20210619060739-3f23d9a07dc5 h1:wcutRUQ6eEwEQ+NwQ/Qa/ziQv+Vdzn54q002jj6PR+o= +github.com/gin-contrib/requestid v0.0.2-0.20210619060739-3f23d9a07dc5/go.mod h1:kMVxxUiR0WHQvXMar6ozdUn4Dx9SltMNpIKBIc4AaAg= +github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= +github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= +github.com/gin-contrib/timeout v0.0.3 h1:ysZQ7kChgqlzBkuLgwTTDjTPP2uqdI68XxRyqIFK68g= +github.com/gin-contrib/timeout v0.0.3/go.mod h1:F3fjkmFc4I1QdF7MyVwtO6ZkPueBckNoiOVpU73HGgU= +github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M= +github.com/gin-gonic/gin v1.7.2/go.mod h1:jD2toBW3GZUr5UMcdrwQA10I7RuaFOl/SGeDjXkfUtY= +github.com/gin-gonic/gin v1.7.4/go.mod h1:jD2toBW3GZUr5UMcdrwQA10I7RuaFOl/SGeDjXkfUtY= +github.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg= +github.com/gin-gonic/gin v1.9.1/go.mod h1:hPrL7YrpYKXt5YId3A/Tnip5kqbEAP+KLuI3SUcPTeU= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= +github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= +github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= +github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= +github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= +github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= +github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= +github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI= +github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4= +github.com/go-playground/validator/v10 v10.14.0 h1:vgvQWe3XCz3gIeFDm/HnTIbj6UGmg/+t63MyGU2n5js= +github.com/go-playground/validator/v10 v10.14.0/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU= +github.com/go-redis/redis/v8 v8.4.2/go.mod h1:A1tbYoHSa1fXwN+//ljcCYYJeLmVrwL9hbQN45Jdy0M= +github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-webauthn/webauthn v0.9.4 h1:YxvHSqgUyc5AK2pZbqkWWR55qKeDPhP8zLDr6lpIc2g= +github.com/go-webauthn/webauthn v0.9.4/go.mod h1:LqupCtzSef38FcxzaklmOn7AykGKhAhr9xlRbdbgnTw= +github.com/go-webauthn/x v0.1.5 h1:V2TCzDU2TGLd0kSZOXdrqDVV5JB9ILnKxA9S53CSBw0= +github.com/go-webauthn/x v0.1.5/go.mod h1:qbzWwcFcv4rTwtCLOZd+icnr6B7oSsAGZJqlt8cukqY= +github.com/gobuffalo/here v0.6.0/go.mod h1:wAG085dHOYqUpf+Ap+WOdrPTp5IYcDAs/x7PLa8Y5fM= +github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU= +github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= +github.com/gocql/gocql v0.0.0-20190301043612-f6df8288f9b4/go.mod h1:4Fw1eo5iaEhDUs8XyuhSVCVy52Jq3L+/3GJgYkwc+/0= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-jwt/jwt v3.2.1+incompatible h1:73Z+4BJcrTC+KczS6WvTPvRGOp1WmfEP4Q1lOd9Z/+c= +github.com/golang-jwt/jwt v3.2.1+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= +github.com/golang-jwt/jwt/v5 v5.2.0 h1:d/ix8ftRUorsN+5eMIlF4T6J8CAt9rch3My2winC1Jw= +github.com/golang-jwt/jwt/v5 v5.2.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= +github.com/golang-migrate/migrate/v4 v4.12.2 h1:QI43Tlouiwpp2dK5Y767OouX0snJNRP/NubsVaArzDU= +github.com/golang-migrate/migrate/v4 v4.12.2/go.mod h1:HQ1DaC8uLHkg4afY8ZQ8D/P5SG+YW9X5INZBVvm+d2k= +github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/flatbuffers v1.11.0/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= +github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/go-tpm v0.9.0 h1:sQF6YqWMi+SCXpsmS3fd21oPy/vSddwZry4JnmltHVk= +github.com/google/go-tpm v0.9.0/go.mod h1:FkNVkc6C+IsvDI9Jw1OveJmxGZUUaKxtrpOS47QWKfU= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/martian/v3 v3.3.2 h1:IqNFLAmvJOgVlpdEBiQbDc2EwKW77amAycfTuWKdfvw= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200507031123-427632fa3b1c/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4= +github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.2.3 h1:yk9/cqRKtT9wXZSsRH9aurXEpJX+U6FLtpYTdC3R06k= +github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gax-go/v2 v2.7.1 h1:gF4c0zjUP2H/s/hEGyLA3I0fA2ZWjzYiONAD6cvPr8A= +github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/handlers v1.4.2/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= +github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= +github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= +github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI= +github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= +github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= +github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= +github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA= +github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE= +github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s= +github.com/jackc/pgconn v1.3.2/go.mod h1:LvCquS3HbBKwgl7KbX9KyqEIumJAbm1UMcTvGaIf3bM= +github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= +github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78= +github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA= +github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg= +github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= +github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= +github.com/jackc/pgproto3/v2 v2.0.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg= +github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc= +github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw= +github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y= +github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM= +github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc= +github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.3.0 h1:OS12ieG61fsCg5+qLJ+SsW9NicxNkg3b25OyT2yCeUc= +github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= +github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.10.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/klauspost/cpuid/v2 v2.2.4 h1:acbojRNwl3o09bUq+yDCtZFc1aiwaAAxtcn8YkZXnvk= +github.com/klauspost/cpuid/v2 v2.2.4/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= +github.com/kong/go-srp v0.0.0-20191210190804-cde1efa3c083 h1:Y7nibF/3Ivmk+S4Q+KzVv98lFlSdrBhYzG44d5il85E= +github.com/kong/go-srp v0.0.0-20191210190804-cde1efa3c083/go.mod h1:Zde5RRLiH8/2zEXQDHX5W0dOOTxkemzrXMhHVfxTtTA= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= +github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q= +github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4= +github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.8.0 h1:9xohqzkUwzR4Ga4ivdTcawVS89YSDVxXMa3xJX3cGzg= +github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lithammer/shortuuid/v3 v3.0.4 h1:uj4xhotfY92Y1Oa6n6HUiFn87CdoEHYUlTy0+IgbLrs= +github.com/lithammer/shortuuid/v3 v3.0.4/go.mod h1:RviRjexKqIzx/7r1peoAITm6m7gnif/h+0zmolKJjzw= +github.com/magiconair/properties v1.8.5 h1:b6kJs+EmPFMYGkow9GiUyCyOvIwYetYJ3fSaWak/Gls= +github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= +github.com/markbates/pkger v0.15.1/go.mod h1:0JoVlrol20BSywW79rN3kdFFsE5xYM+rSCQDXbLhiuI= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= +github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mattn/go-sqlite3 v1.14.6 h1:dNPt6NO46WmLVt2DLNpwczCmdV5boIZ6g/tlDrlRUbg= +github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/nakagami/firebirdsql v0.0.0-20190310045651-3c02a58cfed8/go.mod h1:86wM1zFnC6/uDBfZGNwB65O+pR2OFi5q/YQaEUid1qA= +github.com/neo4j/neo4j-go-driver v1.8.1-0.20200803113522-b626aa943eba/go.mod h1:ncO5VaFWh0Nrt+4KT4mOZboaczBZcLuHrG+/sUeP8gI= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.14.2/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= +github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ= +github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= +github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= +github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= +github.com/pelletier/go-toml v1.9.3 h1:zeC5b1GviRUyKYd6OJPvBU/mcVDVoL1OhT17FCt5dSQ= +github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/pelletier/go-toml/v2 v2.0.8 h1:0ctb6s9mE31h0/lhu+J6OPmVeDxJn+kYnJc2jZR9tGQ= +github.com/pelletier/go-toml/v2 v2.0.8/go.mod h1:vuYfssBdrU2XDZ9bYydBu6t+6a6PYNcZljzZR9VXg+4= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/browser v0.0.0-20180916011732-0a3d74bf9ce4/go.mod h1:4OwLy04Bl9Ef3GJJCoec+30X3LQs/0/m4HFRt/2LUSA= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/pquerna/otp v1.3.0 h1:oJV/SkzR33anKXwQU3Of42rL4wbrffP4uvUf1SvS5Xs= +github.com/pquerna/otp v1.3.0/go.mod h1:dkJfzwRKNiegxyNb54X/3fLwhCynbMspSyWKnvi1AEg= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.1 h1:+4eQaD7vAZ6DsfsxB15hbE0odUjGI5ARs9yskGu1v4s= +github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/remyoudompheng/bigfft v0.0.0-20190728182440-6a916e37a237/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= +github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= +github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= +github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/snowflakedb/glog v0.0.0-20180824191149-f5055e6f21ce/go.mod h1:EB/w24pR5VKI60ecFnKqXzxX3dOorz1rnVicQTQrGM0= +github.com/snowflakedb/gosnowflake v1.3.5/go.mod h1:13Ky+lxzIm3VqNDZJdyvu9MCGy+WgRdYFdXp96UcLZU= +github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= +github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= +github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= +github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.8.1 h1:Kq1fyeebqsBfbjZj4EL7gj2IO0mMaiyjYUWcUsl2O44= +github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stripe/stripe-go/v72 v72.37.0 h1:y/PW0SeIk17S1uq6tQ0RdyeizG1anZlvowMZ4AQ17YY= +github.com/stripe/stripe-go/v72 v72.37.0/go.mod h1:QwqJQtduHubZht9mek5sds9CtQcKFdsykV9ZepRWwo0= +github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/tidwall/pretty v0.0.0-20180105212114-65a9db5fad51/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI= +github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= +github.com/ua-parser/uap-go v0.0.0-20211112212520-00c877edfe0f h1:A+MmlgpvrHLeUP8dkBVn4Pnf5Bp5Yk2OALm7SEJLLE8= +github.com/ua-parser/uap-go v0.0.0-20211112212520-00c877edfe0f/go.mod h1:OBcG9bn7sHtXgarhUEb3OfCnNsgtGnkVf41ilSZ3K3E= +github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= +github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= +github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU= +github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= +github.com/ulule/limiter/v3 v3.8.0 h1:rq76QxDIq5s/rvXc/A6HRHuGmehi/JE18qK3FaRUxKg= +github.com/ulule/limiter/v3 v3.8.0/go.mod h1:TpV4HWgOM7M43mrkE7MU1S62/XtuoZ/C9PL+ExxeTK4= +github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasthttp v1.17.0/go.mod h1:jjraHZVbKOXftJfsOYoAjaeygpj5hr8ermTRJNroD7A= +github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/xanzy/go-gitlab v0.15.0/go.mod h1:8zdQa/ri1dfn8eS3Ir1SyfvOKlw7WBJ8DVThkpGiXrs= +github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= +github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= +github.com/zsais/go-gin-prometheus v0.1.0 h1:bkLv1XCdzqVgQ36ScgRi09MA2UC1t3tAB6nsfErsGO4= +github.com/zsais/go-gin-prometheus v0.1.0/go.mod h1:Slirjzuz8uM8Cw0jmPNqbneoqcUtY2GGjn2bEd4NRLY= +gitlab.com/nyarla/go-crypt v0.0.0-20160106005555-d9a5dc2b789b/go.mod h1:T3BPAOm2cqquPa0MKWeNkmOM5RQsRhkrwMWonFMN7fE= +go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= +go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= +go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= +go.mongodb.org/mongo-driver v1.1.0/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/otel v0.14.0/go.mod h1:vH5xEuwy7Rts0GNtsCW3HYQoZDY+OmBJ6t1bFGGlxgw= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= +golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= +golang.org/x/arch v0.3.0 h1:02VY4/ZcO/gBOH6PUaoiptASxtXU10jazRCP865E97k= +golang.org/x/arch v0.3.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200709230013-948cd5f35899/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= +golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181108082009-03003ca0c849/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201016165138-7b1cca2348c0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= +golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200724161237-0e2f3a69832c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210608053332-aa57babbf139/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.1.0 h1:xYY+Bajn2a7VBmTM5GikTmnK8ZuX8YgnQCqZpbBNtmA= +golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200626171337-aa94e735be7f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200713011307-fd294ab11aed/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200717024301-6ddee64345a6/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200725200936-102e7d357031/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= +google.golang.org/api v0.114.0 h1:1xQPji6cO2E2vLiI+C/XiFAnsn1WV3mjaEwGLhi3grE= +google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200626011028-ee7919e894b5/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200711021454-869866162049/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200720141249-1244ee217b7e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200726014623-da3ae01ef02d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210607140030-00d4fb20b1ae/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A= +google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.56.3 h1:8I4C0Yq1EjstUzUJzpcRVbuYA2mODtEmpWiQoN/b2nc= +google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= +google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.62.0 h1:duBzk771uxoUuOlyRLkHsygud9+5lrlGjdFBb4mSKDU= +gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= +gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +modernc.org/b v1.0.0/go.mod h1:uZWcZfRj1BpYzfN9JTerzlNUnnPsV9O2ZA8JsRcubNg= +modernc.org/db v1.0.0/go.mod h1:kYD/cO29L/29RM0hXYl4i3+Q5VojL31kTUVpVJDw0s8= +modernc.org/file v1.0.0/go.mod h1:uqEokAEn1u6e+J45e54dsEA/pw4o7zLrA2GwyntZzjw= +modernc.org/fileutil v1.0.0/go.mod h1:JHsWpkrk/CnVV1H/eGlFf85BEpfkrp56ro8nojIq9Q8= +modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= +modernc.org/internal v1.0.0/go.mod h1:VUD/+JAkhCpvkUitlEOnhpVxCgsBI90oTzSCRcqQVSM= +modernc.org/lldb v1.0.0/go.mod h1:jcRvJGWfCGodDZz8BPwiKMJxGJngQ/5DrRapkQnLob8= +modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= +modernc.org/ql v1.0.0/go.mod h1:xGVyrLIatPcO2C1JvI/Co8c0sr6y91HKFNy4pt9JXEY= +modernc.org/sortutil v1.1.0/go.mod h1:ZyL98OQHJgH9IEfN71VsamvJgrtRX9Dj2gX+vH86L1k= +modernc.org/strutil v1.1.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs= +modernc.org/zappy v1.0.0/go.mod h1:hHe+oGahLVII/aTTyWK/b53VDHMAGCBYYeZ9sn83HC4= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/server/mail-templates/account_deleted.html b/server/mail-templates/account_deleted.html new file mode 100644 index 000000000..9c0cc6669 --- /dev/null +++ b/server/mail-templates/account_deleted.html @@ -0,0 +1,296 @@ + + + + + + + + + + + + + + + + +
+
+ + + + + + +
+ + + + + + +
+ + + + + + +
+ + + + + + + +
+ + + + + + + + + + + + + +
+
+
+ Hey! +
+
+
+
+
+ As requested by you, we've deleted your ente account and scheduled your uploaded data for deletion. +
+
+
+
+
+ If you accidentally deleted your account, please contact our support immediately to try and recover your uploaded data before the next scheduled deletion happens. +
+
+
+
+
+ + Thank you for checking out ente, we hope that you will give us another opportunity in the future! + +
+
+
+
+
+
+
+
+
+
+ + +
+
+
+
+
+
+
+ +
+
+
+
+
+ + + + diff --git a/server/mail-templates/account_deleted_active_sub.html b/server/mail-templates/account_deleted_active_sub.html new file mode 100644 index 000000000..951ed442e --- /dev/null +++ b/server/mail-templates/account_deleted_active_sub.html @@ -0,0 +1,296 @@ + + + + + + + + + + + + + + + + +
+
+ + + + + + +
+ + + + + + +
+ + + + + + +
+ + + + + + + +
+ + + + + + + + + + + + + +
+
+
+ Hey! +
+
+
+
+
+ As requested by you, we've deleted your ente account and scheduled your uploaded data for deletion. If you have an App Store subscription for ente, please remember to cancel it too. +
+
+
+
+
+ If you accidentally deleted your account, please contact our support immediately to try and recover your uploaded data before the next scheduled deletion happens. +
+
+
+
+
+ + Thank you for checking out ente, we hope that you will give us another opportunity in the future! + +
+
+
+
+
+
+
+
+
+
+ + +
+
+
+
+
+
+
+ +
+
+
+
+
+ + + + diff --git a/server/mail-templates/email_changed.html b/server/mail-templates/email_changed.html new file mode 100644 index 000000000..5343d6bb6 --- /dev/null +++ b/server/mail-templates/email_changed.html @@ -0,0 +1,235 @@ + + + + + + + + + + + + + + + + +
+
+ + + + + + +
+ + + + + + +
+ + + + + + +
+ + + + + + + +
+ + + + + + + + + + + + + +
+
+
+ Hey, +
+
+
+
+
+ This is to alert you that your email address has been updated to {{.NewEmail}}. +
+
+
+
+
+ Please respond if you need any assistance. +
+
+
+
+
+
+ - + team@ente.io +
+
+
+
+
+ +
+
+ +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/server/mail-templates/family_accepted.html b/server/mail-templates/family_accepted.html new file mode 100644 index 000000000..6153e9c89 --- /dev/null +++ b/server/mail-templates/family_accepted.html @@ -0,0 +1,301 @@ + + + + + + + + + + + + + + + + + +
+
+ + + + +
+ + + + +
+ + + + +
+ + + + + +
+ + + + + + + + + + + +
+ Invite accepted +
+ + + + + + +
+ Hey!

+ {{.MemberEmailID}} has joined your family on + ente!
+
+ Your storage space will now be shared with + them.

+ Please check the ente app to manage + your + family.

+
+ + + + + + + + +
+
+ + + + + + +
+
+
+ If + you need support, please respond + to this mail +
+
+
+
+
+ +
+
+
+
+
+ + + \ No newline at end of file diff --git a/server/mail-templates/family_invited.html b/server/mail-templates/family_invited.html new file mode 100644 index 000000000..ad1fb4da9 --- /dev/null +++ b/server/mail-templates/family_invited.html @@ -0,0 +1,385 @@ + + + + + + + + + + + + + + + + + +
+
+ + + + +
+ + + + +
+ + + + +
+ + + + + +
+ + + + + + +
+ Invite to join family +
+ + + + + + + + + + + +
+ + Hey!

{{.AdminEmailID}} has + invited + you to be a part of their family on + ente!

Please + click the button below to upgrade your + storage + space. +

+
+ + + + + + +
+ + + + + + +
+ Accept + Invite +
+
+ + + + + + +
+
+
+ If + the button is not clickable, + please paste the following link + into your browser +
+
+
+
+ + + + + + +
+ + + + + + +
+
+ {{.FamilyInviteLink}} +
+
+
+ + + + + + +
+
+ + + + + + +
+
+
+ If + you need support, please respond + to this mail +
+
+
+
+
+ +
+
+
+
+
+ + + \ No newline at end of file diff --git a/server/mail-templates/family_left.html b/server/mail-templates/family_left.html new file mode 100644 index 000000000..c2056a1a4 --- /dev/null +++ b/server/mail-templates/family_left.html @@ -0,0 +1,302 @@ + + + + + + + + + + + + + + + + + +
+
+ + + + +
+ + + + +
+ + + + +
+ + + + + +
+ + + + + + + + + + + +
+ Invite to join family +
+ + + + + + +
+ Hey!

+ {{.MemberEmailID}} has left your family on + ente!
+
+ Your storage space will no longer be shared + with them. +

+ Please check the ente app to manage + your + family. +
+ + + + + + + + +
+
+ + + + + + +
+
+
+ If + you need support, please respond + to this mail +
+
+
+
+
+ +
+
+
+
+
+ + + \ No newline at end of file diff --git a/server/mail-templates/family_removed.html b/server/mail-templates/family_removed.html new file mode 100644 index 000000000..278023ad4 --- /dev/null +++ b/server/mail-templates/family_removed.html @@ -0,0 +1,300 @@ + + + + + + + + + + + + + + + + + +
+
+ + + + +
+ + + + +
+ + + + +
+ + + + + +
+ + + + + + + + + + + +
+ Invite to join family +
+ + + + + + +
+ Hey!

+ You have been removed from + {{.AdminEmailID}}’s family on + ente.
+
+ Please upgrade your subscription from the + app to continue using ente. + +
+ + + + + + + + +
+
+ + + + + + +
+
+
+ If + you need support, please respond + to this mail +
+
+
+
+
+ +
+
+
+
+
+ + + \ No newline at end of file diff --git a/server/mail-templates/files_collected.html b/server/mail-templates/files_collected.html new file mode 100644 index 000000000..6f48f323f --- /dev/null +++ b/server/mail-templates/files_collected.html @@ -0,0 +1,299 @@ + + + + + + + + + + + + + + + + +
+
+ + + + + + +
+ + + + + + +
+ + + + + + +
+ + + + + + + +
+ + + + + + + + + + + + + +
+

+ 💝 +

+
+ + + + + + +
+
+
+ Hey there! +
+
+
+
+
+ Someone has added photos to your album. +
+
+
+
+
+ + Please open your ente app to view them. + +
+
+
+
+
+
+
+
+
+
+ +
+ About + Blog + Community +
+
+
+
+
+
+
+
+ +
+
+
+
+
+ + + + diff --git a/server/mail-templates/mobile_app_first_upload.html b/server/mail-templates/mobile_app_first_upload.html new file mode 100644 index 000000000..53d7cfa86 --- /dev/null +++ b/server/mail-templates/mobile_app_first_upload.html @@ -0,0 +1,347 @@ + + + + + + + + + + + + + + + + +
+
+ + + + + + +
+ + + + + + +
+ + + + + + +
+ + + + + + + +
+ + + + + + + + + + + + + +
+
+
+ Congratulations + on preserving + your first + memory with + ente! +
+
+
+
+
+ Did you know that we will be + keeping 3 copies of this memory, at 3 different locations so that they are + as safe as they can be? One of these copies will in fact be preserved in + an underground fallout shelter! +
+
+
+
+
+ While we work our magic, + you can go ahead share your memories with your loved ones. + If they aren't on ente yet, + you can share links. +
+
+
+
+
+ That's not all, + if you wish to import more of your photos, we have an awesome + desktop app waiting for you @ + ente.io/download/desktop. + +
+
+
+
+
+ Now as you check out the product, + if there's anything you need help with, just write back and + we'll be there for you! +
+
+
+
+
+ - + team@ente +
+
+
+
+
+
+
+
+
+
+ + +
+
+
+
+
+
+
+ +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/server/mail-templates/on_hold.html b/server/mail-templates/on_hold.html new file mode 100644 index 000000000..612570aec --- /dev/null +++ b/server/mail-templates/on_hold.html @@ -0,0 +1,285 @@ + + + + + + + + + + + + + + + + +
+
+ + + + + + +
+ + + + + + +
+ + + + + + +
+ + + + + + + +
+ + + + + + + + + + + + + +
+
+
+ Hey,
+
+
+
+
+ {{.PaymentProvider}} + has informed us that + they were + unable to renew your + ente subscription. + Please update your + payment method + within + {{.PaymentProvider}} + so that your + subscription can be + renewed.
+
+
+
+
+ If we don't get a + payment confirmation + from + {{.PaymentProvider}} + within the next + 30 days, our systems + may remove your + account and all + associated data with + it.
+
+
+
+
+ If you need support, + please reply to this + email, we're quick + to respond!
+
+
+
+
+ +
+
+
+
+
+ + + + diff --git a/server/mail-templates/ott_auth.html b/server/mail-templates/ott_auth.html new file mode 100644 index 000000000..19b9826d6 --- /dev/null +++ b/server/mail-templates/ott_auth.html @@ -0,0 +1,215 @@ + + + + + + + + + + + + + + +
+
+ + + + +
+ + + + +
+ + + + +
+ + + + + +
+ + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + +
Paste this code into the app to verify your email address
+ + + + + +
+ + + + + + +
+
{{.VerificationCode}}
+
+
+ + + + + +
+
+ + + + + +
Please respond to this email if you are facing any issues
+ +
+
+
+
+
+ + \ No newline at end of file diff --git a/server/mail-templates/ott_change_email.html b/server/mail-templates/ott_change_email.html new file mode 100644 index 000000000..d1964c851 --- /dev/null +++ b/server/mail-templates/ott_change_email.html @@ -0,0 +1,215 @@ + + + + + + + + + + + + + + +
+
+ + + + +
+ + + + +
+ + + + +
+ + + + + +
+ + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + +
Enter the following code to update your email address
+ + + + + +
+ + + + + + +
+
{{.VerificationCode}}
+
+
+ + + + + +
+
+ + + + + +
Please respond to this email if you are facing any issues
+ +
+
+
+
+
+ + \ No newline at end of file diff --git a/server/mail-templates/ott_photos.html b/server/mail-templates/ott_photos.html new file mode 100644 index 000000000..5774eb4f5 --- /dev/null +++ b/server/mail-templates/ott_photos.html @@ -0,0 +1,215 @@ + + + + + + + + + + + + + + +
+
+ + + + +
+ + + + +
+ + + + +
+ + + + + +
+ + + + + + + + + +
+
+ + + + + +
+ +
+ + + + + + +
Paste this code into the app to verify your email address
+ + + + + +
+ + + + + + +
+
{{.VerificationCode}}
+
+
+ + + + + +
+
+ + + + + +
Please respond to this email if you are facing any issues
+ +
+
+
+
+
+ + \ No newline at end of file diff --git a/server/mail-templates/report_alert.html b/server/mail-templates/report_alert.html new file mode 100644 index 000000000..8294458ed --- /dev/null +++ b/server/mail-templates/report_alert.html @@ -0,0 +1,359 @@ + + + + + + + + + + + + + + + + +
+
+ + + + + + +
+ + + + + + +
+ + + + + + +
+ + + + + + + +
+ + + + + + + + + + + + + +
+
+
+ Hey, +
+
+
+
+
+ This + is to notify + that someone you + shared your + album with + has reported + its contents for + abusing our terms + of + service. +
+
+
+
+
+ Here + are more details + regarding this + report: +
    +
  • Album + Link: + {{.AlbumLink}} +
  • +
  • Reason: + {{.Reason}} +
  • +
  • Comments: + {{.Comments}} +
  • +
+
+
+
+ If + there's anything + that you need + help with, + please respond + to this + email. +
+
+
+
+ +
+ Thank + you! + +
+
+
+
+
+
+
+
+
+
+ + +
+
+
+
+
+
+
+ +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/server/mail-templates/report_limit_exceeded_alert.html b/server/mail-templates/report_limit_exceeded_alert.html new file mode 100644 index 000000000..6d9a31d6b --- /dev/null +++ b/server/mail-templates/report_limit_exceeded_alert.html @@ -0,0 +1,353 @@ + + + + + + + + + + + + + + + + +
+
+ + + + + + +
+ + + + + + +
+ + + + + + +
+ + + + + + + +
+ + + + + + + + + + + + + +
+
+
+ Hey, +
+
+
+
+
+ We + have received + too many abuse + reports against + an + album you've + shared over + ente. +
+
+
+
+
+ + In an abundance + of caution, we + have temporarily + disabled the + publicly + accessible link + that we were + serving. + +
+
+
+
+
+ In + the meanwhile, + if + you need + support, + please respond + to this + email. +
+
+
+
+ +
+ Thank + you! + +
+
+
+
+
+
+
+
+
+
+ + +
+
+
+
+
+
+
+ +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/server/mail-templates/storage_limit_exceeded.html b/server/mail-templates/storage_limit_exceeded.html new file mode 100644 index 000000000..49270a864 --- /dev/null +++ b/server/mail-templates/storage_limit_exceeded.html @@ -0,0 +1,253 @@ + + + + + + + + + + + + + + + + +
+
+ + + + + + +
+ + + + + + +
+ + + + + + +
+ + + + + + + +
+ + + + + + + + + + + + + +
+
+
+ Hey, +
+
+
+
+
+ This is to let you know that you have used up your storage limit. The files you've uploaded so far will remain accessible, but no new files will be backed up until you upgrade your subscription. +
+
+
+
+
+ If you're looking for a safe space to preserve more of your memories, please do upgrade, we would be delighted to serve you! +
+
+
+
+
+ In + case you have + any questions or + feedback, just + write back, we'd + be happy to + help. +
+
+
+
+
+ That's + all, we hope you have a memorable day ahead! +
+
+
+
+
+ - + team@ente.io +
+
+
+
+
+
+
+
+ +
+
+
+
+
+ + + + \ No newline at end of file diff --git a/server/mail-templates/subscription_ended.html b/server/mail-templates/subscription_ended.html new file mode 100644 index 000000000..4a158d767 --- /dev/null +++ b/server/mail-templates/subscription_ended.html @@ -0,0 +1,264 @@ + + + + + + + + + + + + + + + + +
+
+ + + + + + +
+ + + + + + +
+ + + + + + +
+ + + + + + + +
+ + + + + + + + + + + + + +
+
+
+ Your subscription to + ente Photos has ended. Thank you for trying out ente. +
+
+
+
+ If you still have data stored in ente, we encourage you to follow the steps outlined here to export your data: ente.io/faq/migration/out-of-ente. +
+
+
+
+ If there's anything we could have done better, please let us know by replying to + this email. Your feedback will help us be better by the next time you subscribe! +
+
+
+
+
+
+
+ +
+
+
+
+
+ + + + diff --git a/server/mail-templates/subscription_upgraded.html b/server/mail-templates/subscription_upgraded.html new file mode 100644 index 000000000..241bc3915 --- /dev/null +++ b/server/mail-templates/subscription_upgraded.html @@ -0,0 +1,322 @@ + + + + + + + + + + + + + + + + +
+
+ + + + + + +
+ + + + + + +
+ + + + + + +
+ + + + + + + +
+ + + + + + + + + + + + + +
+
+
+ Hello! +
+
+
+
+
+ We want to take this opportunity to thank you for subscribing to a paid plan. +
+
+
+
+
+ + In case you did not know, you can share links to your albums with your loved ones who aren't on ente. You can even let them add photos via these links. All this, end-to-end encrypted, in original quality. + +
+
+
+
+
+ + You can also use our family plans to share your storage with them, at no extra cost. + +
+
+
+
+
+ + If at any point you need support, or have feedback to share, please do write to us. We want ente to work well for you. + +
+
+
+
+
+ + Here's to a beautiful journey together 🥂 + +
+
+
+
+
+ + - team@ente + +
+
+
+
+
+
+
+
+
+
+ +
+ About + Blog + Community + Shop +
+
+
+
+
+
+
+
+ +
+
+
+
+
+ + + + diff --git a/server/mail-templates/successful_referral.html b/server/mail-templates/successful_referral.html new file mode 100644 index 000000000..60812e0fd --- /dev/null +++ b/server/mail-templates/successful_referral.html @@ -0,0 +1,298 @@ + + + + + + + + + + + + + + + + +
+
+ + + + + + +
+ + + + + + +
+ + + + + + +
+ + + + + + + +
+ + + + + + + + + + + + + +
+
+
+ Congratulations! +
+
+
+
+
+ One of the customers you referred has upgraded to a paid plan, and as a thank you, we have credited 10 GB to your account. +
+
+
+
+
+ + Thank you for spreading the word! + +
+
+
+
+
+ + - team@ente + +
+
+
+
+
+
+
+
+
+
+ + +
+
+
+
+
+
+
+ +
+
+
+
+
+ + + + diff --git a/server/mail-templates/web_app_first_upload.html b/server/mail-templates/web_app_first_upload.html new file mode 100644 index 000000000..835568dce --- /dev/null +++ b/server/mail-templates/web_app_first_upload.html @@ -0,0 +1,463 @@ + + + + + + + + + + + + + + + + +
+
+ + + + + + +
+ + + + + + +
+ + + + + + +
+ + + + + + + +
+ + + + + + + + + + + + + +
+
+
+ Congratulations + on preserving + your first + memory with + ente! +
+
+
+
+
+ Did you know that we will be + keeping 3 copies of this memory, at 3 different locations so that they are + as safe as they can be? One of these copies will in fact be preserved in + an underground fallout shelter! +
+
+
+
+
+ While we work our magic, + you can go ahead share your memories with your loved ones. + If they aren't on ente yet, + you can share links. +
+
+
+
+
+ That's not all, + we have beautiful mobile apps (linked below) that backup + the photos you capture, automatically in the background. + +
+
+
+
+
+ Now as you check out the product, + if there's anything you need help with, just write back and + we'll be there for you! +
+
+
+
+
+ - + team@ente +
+
+
+
+ +
+
+
+ + + +
+
+ +
+ + + + + + + + +
+ + + + + +
+ + Download on PlayStore + +
+ +
+ + +
+ +
+
+ + +
+
+ +
+ + + + + + + + + +
+ + + + + +
+ + Download on AppStore + +
+ +
+ + +
+ +
+
+ + +
+
+
+
+
+
+
+
+
+
+ + +
+
+
+
+
+
+
+
+ +
+
+
+
+
+ + + + diff --git a/server/migrations/10_update_temp_object_keys.down.sql b/server/migrations/10_update_temp_object_keys.down.sql new file mode 100644 index 000000000..f00d91ced --- /dev/null +++ b/server/migrations/10_update_temp_object_keys.down.sql @@ -0,0 +1,6 @@ +ALTER TABLE temp_objects + DROP COLUMN is_multipart, + DROP COLUMN upload_id; + +ALTER TABLE temp_objects + RENAME TO temp_object_keys; diff --git a/server/migrations/10_update_temp_object_keys.up.sql b/server/migrations/10_update_temp_object_keys.up.sql new file mode 100644 index 000000000..47925dcda --- /dev/null +++ b/server/migrations/10_update_temp_object_keys.up.sql @@ -0,0 +1,13 @@ +ALTER TABLE temp_object_keys + RENAME TO temp_objects; + +ALTER TABLE temp_objects + ADD COLUMN is_multipart BOOLEAN, + ADD COLUMN upload_id TEXT; + +UPDATE temp_objects SET is_multipart ='f'; + +ALTER TABLE temp_objects + ALTER COLUMN is_multipart SET NOT NULL, + ALTER COLUMN is_multipart SET DEFAULT FALSE; + \ No newline at end of file diff --git a/server/migrations/11_remove_kek_hash_constraint.down.sql b/server/migrations/11_remove_kek_hash_constraint.down.sql new file mode 100644 index 000000000..dbece3abf --- /dev/null +++ b/server/migrations/11_remove_kek_hash_constraint.down.sql @@ -0,0 +1 @@ +ALTER TABLE key_attributes ALTER COLUMN kek_hash_bytes SET NOT NULL; diff --git a/server/migrations/11_remove_kek_hash_constraint.up.sql b/server/migrations/11_remove_kek_hash_constraint.up.sql new file mode 100644 index 000000000..e8cc7fd0f --- /dev/null +++ b/server/migrations/11_remove_kek_hash_constraint.up.sql @@ -0,0 +1 @@ +ALTER TABLE key_attributes ALTER COLUMN kek_hash_bytes DROP NOT NULL; diff --git a/server/migrations/12_add_hash_limits.down.sql b/server/migrations/12_add_hash_limits.down.sql new file mode 100644 index 000000000..d09135994 --- /dev/null +++ b/server/migrations/12_add_hash_limits.down.sql @@ -0,0 +1,3 @@ +ALTER TABLE key_attributes DROP COLUMN mem_limit; + +ALTER TABLE key_attributes DROP COLUMN ops_limit; diff --git a/server/migrations/12_add_hash_limits.up.sql b/server/migrations/12_add_hash_limits.up.sql new file mode 100644 index 000000000..6c9ad3e2c --- /dev/null +++ b/server/migrations/12_add_hash_limits.up.sql @@ -0,0 +1,7 @@ +ALTER TABLE key_attributes ADD COLUMN mem_limit INTEGER DEFAULT 67108864; + +UPDATE key_attributes SET mem_limit = 67108864; -- crypto_pwhash_MEMLIMIT_INTERACTIVE + +ALTER TABLE key_attributes ADD COLUMN ops_limit INTEGER DEFAULT 2; + +UPDATE key_attributes SET ops_limit = 2; -- crypto_pwhash_OPSLIMIT_INTERACTIVE diff --git a/server/migrations/13_add_recovery_key.down.sql b/server/migrations/13_add_recovery_key.down.sql new file mode 100644 index 000000000..d347aa307 --- /dev/null +++ b/server/migrations/13_add_recovery_key.down.sql @@ -0,0 +1,5 @@ +ALTER TABLE key_attributes + DROP COLUMN master_key_encrypted_with_recovery_key, + DROP COLUMN master_key_decryption_nonce, + DROP COLUMN recovery_key_encrypted_with_master_key, + DROP COLUMN recovery_key_decryption_nonce; diff --git a/server/migrations/13_add_recovery_key.up.sql b/server/migrations/13_add_recovery_key.up.sql new file mode 100644 index 000000000..6e6d222b2 --- /dev/null +++ b/server/migrations/13_add_recovery_key.up.sql @@ -0,0 +1,5 @@ +ALTER TABLE key_attributes + ADD COLUMN master_key_encrypted_with_recovery_key TEXT, + ADD COLUMN master_key_decryption_nonce TEXT, + ADD COLUMN recovery_key_encrypted_with_master_key TEXT, + ADD COLUMN recovery_key_decryption_nonce TEXT; diff --git a/server/migrations/14_add_user_agent.down.sql b/server/migrations/14_add_user_agent.down.sql new file mode 100644 index 000000000..646579177 --- /dev/null +++ b/server/migrations/14_add_user_agent.down.sql @@ -0,0 +1,3 @@ +ALTER TABLE tokens + DROP COLUMN ip, + DROP COLUMN user_agent; diff --git a/server/migrations/14_add_user_agent.up.sql b/server/migrations/14_add_user_agent.up.sql new file mode 100644 index 000000000..de48cb645 --- /dev/null +++ b/server/migrations/14_add_user_agent.up.sql @@ -0,0 +1,3 @@ +ALTER TABLE tokens + ADD COLUMN ip TEXT, + ADD COLUMN user_agent TEXT; diff --git a/server/migrations/15_update_subscriptions.down.sql b/server/migrations/15_update_subscriptions.down.sql new file mode 100644 index 000000000..e7d4afe69 --- /dev/null +++ b/server/migrations/15_update_subscriptions.down.sql @@ -0,0 +1,5 @@ +ALTER TABLE subscriptions + DROP COLUMN attributes, + DROP CONSTRAINT subscription_user_id_unique_constraint_index, + ALTER COLUMN latest_verification_data SET NOT NULL; + diff --git a/server/migrations/15_update_subscriptions.up.sql b/server/migrations/15_update_subscriptions.up.sql new file mode 100644 index 000000000..6ae8f5ae2 --- /dev/null +++ b/server/migrations/15_update_subscriptions.up.sql @@ -0,0 +1,13 @@ +ALTER TABLE subscriptions + ADD CONSTRAINT subscription_user_id_unique_constraint_index UNIQUE (user_id), + ADD COLUMN attributes JSONB; + +UPDATE subscriptions + SET attributes = + CAST('{"latest_verification_data":"' || latest_verification_data ||'"}' + AS json); + +ALTER TABLE subscriptions + ALTER COLUMN attributes SET NOT NULL, + ALTER COLUMN latest_verification_data DROP NOT NULL; + diff --git a/server/migrations/16_remove_deleted_file_objects.down.sql b/server/migrations/16_remove_deleted_file_objects.down.sql new file mode 100644 index 000000000..b6fcbaa5d --- /dev/null +++ b/server/migrations/16_remove_deleted_file_objects.down.sql @@ -0,0 +1 @@ +-- Just for sanity diff --git a/server/migrations/16_remove_deleted_file_objects.up.sql b/server/migrations/16_remove_deleted_file_objects.up.sql new file mode 100644 index 000000000..4d282b4d2 --- /dev/null +++ b/server/migrations/16_remove_deleted_file_objects.up.sql @@ -0,0 +1,11 @@ +DELETE FROM file_object_keys +WHERE file_id NOT IN ( + SELECT DISTINCT file_id FROM collection_files + WHERE is_deleted=false + ); + +DELETE FROM thumbnail_object_keys +WHERE file_id NOT IN ( + SELECT DISTINCT file_id FROM collection_files + WHERE is_deleted=false +); diff --git a/server/migrations/17_add_notification_history_table.down.sql b/server/migrations/17_add_notification_history_table.down.sql new file mode 100644 index 000000000..300aef6c8 --- /dev/null +++ b/server/migrations/17_add_notification_history_table.down.sql @@ -0,0 +1 @@ +DROP TABLE notification_history; diff --git a/server/migrations/17_add_notification_history_table.up.sql b/server/migrations/17_add_notification_history_table.up.sql new file mode 100644 index 000000000..f3194905d --- /dev/null +++ b/server/migrations/17_add_notification_history_table.up.sql @@ -0,0 +1,10 @@ +CREATE TABLE IF NOT EXISTS notification_history ( + user_id INTEGER NOT NULL, + template_id TEXT NOT NULL, + sent_time BIGINT NOT NULL, + + CONSTRAINT fk_notification_history_user_id + FOREIGN KEY(user_id) + REFERENCES users(user_id) + ON DELETE CASCADE +); diff --git a/server/migrations/18_update_size_column.down.sql b/server/migrations/18_update_size_column.down.sql new file mode 100644 index 000000000..c9d6884af --- /dev/null +++ b/server/migrations/18_update_size_column.down.sql @@ -0,0 +1,5 @@ +ALTER TABLE file_object_keys + ALTER COLUMN size TYPE INTEGER; + +ALTER TABLE thumbnail_object_keys + ALTER COLUMN size TYPE INTEGER; diff --git a/server/migrations/18_update_size_column.up.sql b/server/migrations/18_update_size_column.up.sql new file mode 100644 index 000000000..525f9e28e --- /dev/null +++ b/server/migrations/18_update_size_column.up.sql @@ -0,0 +1,5 @@ +ALTER TABLE file_object_keys + ALTER COLUMN size TYPE BIGINT; + +ALTER TABLE thumbnail_object_keys + ALTER COLUMN size TYPE BIGINT; diff --git a/server/migrations/19_add_encrypted_email_columns.down.sql b/server/migrations/19_add_encrypted_email_columns.down.sql new file mode 100644 index 000000000..c1da7d0a5 --- /dev/null +++ b/server/migrations/19_add_encrypted_email_columns.down.sql @@ -0,0 +1,14 @@ +ALTER TABLE users + DROP COLUMN encrypted_email, + DROP COLUMN email_decryption_nonce, + DROP COLUMN email_hash; + +DROP INDEX users_email_hash_index; + +ALTER TABLE users ALTER COLUMN email SET NOT NULL; + +ALTER TABLE otts DROP COLUMN email_hash; + +ALTER TABLE otts ALTER COLUMN email SET NOT NULL; + +DROP INDEX otts_email_hash_index; diff --git a/server/migrations/19_add_encrypted_email_columns.up.sql b/server/migrations/19_add_encrypted_email_columns.up.sql new file mode 100644 index 000000000..98d0914c7 --- /dev/null +++ b/server/migrations/19_add_encrypted_email_columns.up.sql @@ -0,0 +1,15 @@ +ALTER TABLE users + ADD COLUMN encrypted_email BYTEA, + ADD COLUMN email_decryption_nonce BYTEA, + ADD COLUMN email_hash TEXT UNIQUE; + +CREATE INDEX IF NOT EXISTS users_email_hash_index ON users(email_hash); + +ALTER TABLE users ALTER COLUMN email DROP NOT NULL; + +ALTER TABLE otts + ADD COLUMN email_hash TEXT; + +ALTER TABLE otts ALTER COLUMN email DROP NOT NULL; + +CREATE INDEX IF NOT EXISTS otts_email_hash_index ON otts(email_hash); diff --git a/server/migrations/1_create_tables.down.sql b/server/migrations/1_create_tables.down.sql new file mode 100644 index 000000000..0f5137d10 --- /dev/null +++ b/server/migrations/1_create_tables.down.sql @@ -0,0 +1,31 @@ +DROP TABLE files; + +DROP TABLE file_object_keys; + +DROP TABLE thumbnail_object_keys; + +DROP TABLE temp_object_keys; + +DROP TABLE users; + +DROP TABLE key_attributes; + +DROP TABLE otts; + +DROP TABLE tokens; + +DROP INDEX users_email_index; + +DROP INDEX files_owner_id_index; + +DROP INDEX files_updation_time_index; + +DROP INDEX tokens_user_id_index; + +DROP INDEX collections_owner_id_index; + +DROP INDEX collection_shares_to_user_id_index; + +DROP INDEX collection_files_collection_id_index; + +DROP INDEX collections_favorites_constraint_index; diff --git a/server/migrations/1_create_tables.up.sql b/server/migrations/1_create_tables.up.sql new file mode 100644 index 000000000..85ba9c303 --- /dev/null +++ b/server/migrations/1_create_tables.up.sql @@ -0,0 +1,155 @@ +CREATE TABLE IF NOT EXISTS users ( + user_id SERIAL PRIMARY KEY, + email TEXT UNIQUE NOT NULL, + name TEXT, + creation_time BIGINT NOT NULL +); + +CREATE TABLE IF NOT EXISTS files ( + file_id BIGSERIAL PRIMARY KEY, + owner_id INTEGER NOT NULL, + file_decryption_header TEXT NOT NULL, + thumbnail_decryption_header TEXT NOT NULL, + metadata_decryption_header TEXT NOT NULL, + encrypted_metadata TEXT NOT NULL, + updation_time BIGINT NOT NULL, + CONSTRAINT fk_files_owner_id + FOREIGN KEY(owner_id) + REFERENCES users(user_id) + ON DELETE CASCADE +); + +CREATE TABLE IF NOT EXISTS file_object_keys ( + file_id BIGINT PRIMARY KEY, + object_key TEXT UNIQUE NOT NULL, + size INTEGER NOT NULL, + CONSTRAINT fk_file_object_keys_file_id + FOREIGN KEY(file_id) + REFERENCES files(file_id) + ON DELETE CASCADE +); + +CREATE TABLE IF NOT EXISTS thumbnail_object_keys ( + file_id BIGINT PRIMARY KEY, + object_key TEXT UNIQUE NOT NULL, + size INTEGER NOT NULL, + CONSTRAINT fk_thumbnail_object_keys_file_id + FOREIGN KEY(file_id) + REFERENCES files(file_id) + ON DELETE CASCADE +); + +CREATE TABLE IF NOT EXISTS temp_object_keys ( + object_key TEXT PRIMARY KEY NOT NULL, + expiration_time BIGINT NOT NULL +); + +CREATE TABLE IF NOT EXISTS otts ( + user_id INTEGER NOT NULL, + ott TEXT UNIQUE NOT NULL, + creation_time BIGINT NOT NULL, + expiration_time BIGINT NOT NULL, + CONSTRAINT fk_otts_user_id + FOREIGN KEY(user_id) + REFERENCES users(user_id) + ON DELETE CASCADE +); + +CREATE TABLE IF NOT EXISTS tokens ( + user_id INTEGER NOT NULL, + token TEXT UNIQUE NOT NULL, + creation_time BIGINT NOT NULL, + CONSTRAINT fk_tokens_user_id + FOREIGN KEY(user_id) + REFERENCES users(user_id) + ON DELETE CASCADE +); + +CREATE TABLE IF NOT EXISTS key_attributes ( + user_id INTEGER PRIMARY KEY, + kek_salt TEXT NOT NULL, + kek_hash_bytes BYTEA NOT NULL, + encrypted_key TEXT NOT NULL, + key_decryption_nonce TEXT NOT NULL, + public_key TEXT NOT NULL, + encrypted_secret_key TEXT NOT NULL, + secret_key_decryption_nonce TEXT NOT NULL, + CONSTRAINT fk_key_attributes_user_id + FOREIGN KEY(user_id) + REFERENCES users(user_id) + ON DELETE CASCADE +); + +CREATE TABLE IF NOT EXISTS collections ( + collection_id SERIAL PRIMARY KEY, + owner_id INTEGER NOT NULL, + encrypted_key TEXT NOT NULL, + key_decryption_nonce TEXT NOT NULL, + name TEXT NOT NULL, + type TEXT NOT NULL, + attributes JSONB NOT NULL, + updation_time BIGINT NOT NULL, + is_deleted BOOLEAN DEFAULT FALSE, + CONSTRAINT fk_collections_owner_id + FOREIGN KEY(owner_id) + REFERENCES users(user_id) + ON DELETE CASCADE +); + +CREATE TABLE IF NOT EXISTS collection_shares ( + collection_id INTEGER NOT NULL, + from_user_id INTEGER NOT NULL, + to_user_id INTEGER NOT NULL, + encrypted_key TEXT NOT NULL, + updation_time BIGINT NOT NULL, + is_deleted BOOLEAN DEFAULT FALSE, + UNIQUE(collection_id, from_user_id, to_user_id), + CONSTRAINT fk_collection_shares_collection_id + FOREIGN KEY(collection_id) + REFERENCES collections(collection_id) + ON DELETE CASCADE, + CONSTRAINT fk_collection_shares_from_user_id + FOREIGN KEY(from_user_id) + REFERENCES users(user_id) + ON DELETE CASCADE, + CONSTRAINT fk_collection_shares_to_user_id + FOREIGN KEY(to_user_id) + REFERENCES users(user_id) + ON DELETE CASCADE +); + +CREATE TABLE IF NOT EXISTS collection_files ( + file_id BIGINT NOT NULL, + collection_id INTEGER NOT NULL, + encrypted_key TEXT NOT NULL, + key_decryption_nonce TEXT NOT NULL, + is_deleted BOOLEAN DEFAULT FALSE, + updation_time BIGINT NOT NULL, + CONSTRAINT unique_collection_files_cid_fid UNIQUE(collection_id, file_id), + CONSTRAINT fk_collection_files_collection_id + FOREIGN KEY(collection_id) + REFERENCES collections(collection_id) + ON DELETE CASCADE, + CONSTRAINT fk_collection_files_file_id + FOREIGN KEY(file_id) + REFERENCES files(file_id) + ON DELETE CASCADE +); + +CREATE INDEX IF NOT EXISTS users_email_index ON users(email); + +CREATE INDEX IF NOT EXISTS files_owner_id_index ON files (owner_id); + +CREATE INDEX IF NOT EXISTS files_updation_time_index ON files (updation_time); + +CREATE INDEX IF NOT EXISTS otts_user_id_index ON otts (user_id); + +CREATE INDEX IF NOT EXISTS tokens_user_id_index ON tokens (user_id); + +CREATE INDEX IF NOT EXISTS collections_owner_id_index ON collections (owner_id); + +CREATE INDEX IF NOT EXISTS collection_shares_to_user_id_index ON collection_shares (to_user_id); + +CREATE INDEX IF NOT EXISTS collection_files_collection_id_index ON collection_files (collection_id); + +CREATE UNIQUE INDEX IF NOT EXISTS collections_favorites_constraint_index ON collections (owner_id) WHERE (type = 'favorites'); diff --git a/server/migrations/20_recompute_usage.down.sql b/server/migrations/20_recompute_usage.down.sql new file mode 100644 index 000000000..15777c72d --- /dev/null +++ b/server/migrations/20_recompute_usage.down.sql @@ -0,0 +1 @@ +-- just from sanity diff --git a/server/migrations/20_recompute_usage.up.sql b/server/migrations/20_recompute_usage.up.sql new file mode 100644 index 000000000..6cf999bee --- /dev/null +++ b/server/migrations/20_recompute_usage.up.sql @@ -0,0 +1,19 @@ +INSERT INTO usage(user_id,storage_consumed) + SELECT user_id, COALESCE(total_file_size+total_thumbnail_size,0) as storage_consumed FROM + users, + LATERAL ( + SELECT SUM(size) AS total_thumbnail_size + FROM thumbnail_object_keys + LEFT JOIN files ON files.file_id = thumbnail_object_keys.file_id + WHERE + owner_id = users.user_id + ) query_1, + LATERAL ( + SELECT SUM(size) AS total_file_size + FROM file_object_keys + LEFT JOIN files ON files.file_id = file_object_keys.file_id + WHERE + owner_id = users.user_id + ) query_2 + ON CONFLICT (user_id) + DO UPDATE SET storage_consumed =EXCLUDED.storage_consumed; diff --git a/server/migrations/21_add_two_factor.down.sql b/server/migrations/21_add_two_factor.down.sql new file mode 100644 index 000000000..60cca17b8 --- /dev/null +++ b/server/migrations/21_add_two_factor.down.sql @@ -0,0 +1,8 @@ +ALTER TABLE users DROP COLUMN is_two_factor_enabled; + +DROP TABLE two_factor; + +DROP TABLE temp_two_factor; + +DROP TABLE two_factor_sessions; + diff --git a/server/migrations/21_add_two_factor.up.sql b/server/migrations/21_add_two_factor.up.sql new file mode 100644 index 000000000..2af0967f2 --- /dev/null +++ b/server/migrations/21_add_two_factor.up.sql @@ -0,0 +1,43 @@ +ALTER TABLE users ADD COLUMN is_two_factor_enabled boolean; + +UPDATE users SET is_two_factor_enabled = 'f'; + +ALTER TABLE users ALTER COLUMN is_two_factor_enabled SET NOT NULL; +ALTER TABLE users ALTER COLUMN is_two_factor_enabled SET DEFAULT FALSE; + +CREATE TABLE IF NOT EXISTS two_factor( + user_id INTEGER NOT NULL UNIQUE, + two_factor_secret_hash TEXT UNIQUE, + encrypted_two_factor_secret BYTEA, + two_factor_secret_decryption_nonce BYTEA, + recovery_encrypted_two_factor_secret TEXT, + recovery_two_factor_secret_decryption_nonce TEXT, + CONSTRAINT fk_two_factor_user_id + FOREIGN KEY(user_id) + REFERENCES users(user_id) + ON DELETE CASCADE +); + +CREATE TABLE IF NOT EXISTS temp_two_factor( + user_id INTEGER NOT NULL, + two_factor_secret_hash TEXT UNIQUE, + encrypted_two_factor_secret BYTEA, + two_factor_secret_decryption_nonce BYTEA, + creation_time BIGINT NOT NULL, + expiration_time BIGINT NOT NULL, + CONSTRAINT fk_two_factor_user_id + FOREIGN KEY(user_id) + REFERENCES users(user_id) + ON DELETE CASCADE +); + +CREATE TABLE IF NOT EXISTS two_factor_sessions( + user_id INTEGER NOT NULL, + session_id TEXT UNIQUE NOT NULL, + creation_time BIGINT NOT NULL, + expiration_time BIGINT NOT NULL, + CONSTRAINT fk_sessions_user_id + FOREIGN KEY(user_id) + REFERENCES users(user_id) + ON DELETE CASCADE +); diff --git a/server/migrations/22_add_location_tag_table.down.sql b/server/migrations/22_add_location_tag_table.down.sql new file mode 100644 index 000000000..096ded0d7 --- /dev/null +++ b/server/migrations/22_add_location_tag_table.down.sql @@ -0,0 +1,2 @@ +DROP TRIGGER IF EXISTS update_location_tag_updated_at ON location_tag; +DROP TABLE location_tag; diff --git a/server/migrations/22_add_location_tag_table.up.sql b/server/migrations/22_add_location_tag_table.up.sql new file mode 100644 index 000000000..4f1075698 --- /dev/null +++ b/server/migrations/22_add_location_tag_table.up.sql @@ -0,0 +1,38 @@ +CREATE OR REPLACE FUNCTION now_utc_micro_seconds() RETURNS BIGINT AS +$$ +SELECT CAST(extract(EPOCH from now() at time zone 'utc') * 1000000 as BIGINT) ; +$$ language sql; + +-- We can reuse this func to create triggers in other tables. +CREATE OR REPLACE FUNCTION trigger_updated_at_microseconds_column() + RETURNS TRIGGER AS +$$ +BEGIN + NEW.updated_at = now_utc_micro_seconds(); + RETURN NEW; +END; +$$ language 'plpgsql'; + +CREATE TABLE IF NOT EXISTS location_tag +( + id uuid PRIMARY KEY NOT NULL, + user_id INTEGER NOT NULL, + provider TEXT NOT NULL DEFAULT 'USER', + is_deleted BOOLEAN NOT NULL DEFAULT FALSE, + created_at bigint NOT NULL DEFAULT now_utc_micro_seconds(), + updated_at bigint NOT NULL DEFAULT now_utc_micro_seconds(), + encrypted_key TEXT NOT NULL, + key_decryption_nonce TEXT NOT NULL, + attributes JSONB NOT NULL, + CONSTRAINT fk_location_tag_user_id + FOREIGN KEY (user_id) + REFERENCES users (user_id) + ON DELETE CASCADE +); + +CREATE TRIGGER update_location_tag_updated_at + BEFORE UPDATE + ON location_tag + FOR EACH ROW +EXECUTE PROCEDURE + trigger_updated_at_microseconds_column(); diff --git a/server/migrations/23_update_mem_limit_column.down.sql b/server/migrations/23_update_mem_limit_column.down.sql new file mode 100644 index 000000000..76ada3d09 --- /dev/null +++ b/server/migrations/23_update_mem_limit_column.down.sql @@ -0,0 +1,2 @@ +ALTER TABLE key_attributes + ALTER COLUMN mem_limit TYPE INTEGER; diff --git a/server/migrations/23_update_mem_limit_column.up.sql b/server/migrations/23_update_mem_limit_column.up.sql new file mode 100644 index 000000000..5c7c76b3a --- /dev/null +++ b/server/migrations/23_update_mem_limit_column.up.sql @@ -0,0 +1,2 @@ +ALTER TABLE key_attributes + ALTER COLUMN mem_limit TYPE BIGINT; diff --git a/server/migrations/24_bump_ids.down.sql b/server/migrations/24_bump_ids.down.sql new file mode 100644 index 000000000..e935d5de5 --- /dev/null +++ b/server/migrations/24_bump_ids.down.sql @@ -0,0 +1 @@ +-- # DO NOTHING diff --git a/server/migrations/24_bump_ids.up.sql b/server/migrations/24_bump_ids.up.sql new file mode 100644 index 000000000..918a9316c --- /dev/null +++ b/server/migrations/24_bump_ids.up.sql @@ -0,0 +1,3 @@ +ALTER SEQUENCE files_file_id_seq RESTART WITH 10000000; +ALTER SEQUENCE collections_collection_id_seq RESTART WITH 10000000; +ALTER SEQUENCE users_user_id_seq RESTART WITH 10000000; diff --git a/server/migrations/25_alter_ids_to_bigint.down.sql b/server/migrations/25_alter_ids_to_bigint.down.sql new file mode 100644 index 000000000..63a493410 --- /dev/null +++ b/server/migrations/25_alter_ids_to_bigint.down.sql @@ -0,0 +1 @@ +-- do nothing diff --git a/server/migrations/25_alter_ids_to_bigint.up.sql b/server/migrations/25_alter_ids_to_bigint.up.sql new file mode 100644 index 000000000..ac91cf457 --- /dev/null +++ b/server/migrations/25_alter_ids_to_bigint.up.sql @@ -0,0 +1,46 @@ +ALTER TABLE collection_files ALTER COLUMN collection_id TYPE BIGINT; + +ALTER TABLE collection_shares ALTER COLUMN collection_id TYPE BIGINT; +ALTER TABLE collection_shares ALTER COLUMN from_user_id TYPE BIGINT; +ALTER TABLE collection_shares ALTER COLUMN to_user_id TYPE BIGINT; + +ALTER TABLE collections ALTER COLUMN collection_id TYPE BIGINT; +ALTER TABLE collections ALTER COLUMN owner_id TYPE BIGINT; + +ALTER TABLE files ALTER COLUMN owner_id TYPE BIGINT; + +ALTER TABLE key_attributes ALTER COLUMN user_id TYPE BIGINT; + +ALTER TABLE location_tag ALTER COLUMN user_id TYPE BIGINT; + +ALTER TABLE notification_history ALTER COLUMN user_id TYPE BIGINT; + +ALTER TABLE subscription_logs ALTER COLUMN user_id TYPE BIGINT; + +ALTER TABLE subscriptions ALTER COLUMN user_id TYPE BIGINT; + +ALTER TABLE temp_two_factor ALTER COLUMN user_id TYPE BIGINT; + +ALTER TABLE tokens ALTER COLUMN user_id TYPE BIGINT; + +ALTER TABLE two_factor ALTER COLUMN user_id TYPE BIGINT; + +ALTER TABLE two_factor_sessions ALTER COLUMN user_id TYPE BIGINT; + +ALTER TABLE usage ALTER COLUMN user_id TYPE BIGINT; + +ALTER TABLE users ALTER COLUMN user_id TYPE BIGINT; + +BEGIN; +ALTER TABLE collections ALTER collection_id DROP DEFAULT; +DROP SEQUENCE collections_collection_id_seq; +ALTER TABLE collections + ALTER collection_id ADD GENERATED ALWAYS AS IDENTITY (RESTART 1580559962386438); +COMMIT; + +BEGIN; +ALTER TABLE users ALTER user_id DROP DEFAULT; +DROP SEQUENCE users_user_id_seq; +ALTER TABLE users + ALTER user_id ADD GENERATED ALWAYS AS IDENTITY (RESTART 1580559962386438); +COMMIT; diff --git a/server/migrations/26_update_ott_and_twofactor.down.sql b/server/migrations/26_update_ott_and_twofactor.down.sql new file mode 100644 index 000000000..974a0395f --- /dev/null +++ b/server/migrations/26_update_ott_and_twofactor.down.sql @@ -0,0 +1,6 @@ +ALTER TABLE otts + DROP COLUMN wrong_attempt; + + +ALTER TABLE two_factor_sessions + DROP COLUMN wrong_attempt; diff --git a/server/migrations/26_update_ott_and_twofactor.up.sql b/server/migrations/26_update_ott_and_twofactor.up.sql new file mode 100644 index 000000000..fccd97065 --- /dev/null +++ b/server/migrations/26_update_ott_and_twofactor.up.sql @@ -0,0 +1,19 @@ +ALTER TABLE otts + ADD COLUMN wrong_attempt INTEGER DEFAULT 0; + +ALTER TABLE two_factor_sessions + ADD COLUMN wrong_attempt INTEGER DEFAULT 0; + +BEGIN; +UPDATE otts set wrong_attempt = 0 where wrong_attempt is null; + +ALTER TABLE otts + ALTER COLUMN wrong_attempt SET NOT NULL; +COMMIT; + +BEGIN; +UPDATE two_factor_sessions set wrong_attempt = 0 where wrong_attempt is null; + +ALTER TABLE two_factor_sessions + ALTER COLUMN wrong_attempt SET NOT NULL; +COMMIT; diff --git a/server/migrations/27_consolidate_files_table.down.sql b/server/migrations/27_consolidate_files_table.down.sql new file mode 100644 index 000000000..b6812d20c --- /dev/null +++ b/server/migrations/27_consolidate_files_table.down.sql @@ -0,0 +1,3 @@ +DROP TRIGGER IF EXISTS update_object_keys_updated_at ON object_keys; +DROP TABLE IF EXISTS object_keys; +DROP TYPE OBJECT_TYPE; diff --git a/server/migrations/27_consolidate_files_table.up.sql b/server/migrations/27_consolidate_files_table.up.sql new file mode 100644 index 000000000..9cdb93791 --- /dev/null +++ b/server/migrations/27_consolidate_files_table.up.sql @@ -0,0 +1,34 @@ +CREATE TYPE OBJECT_TYPE as ENUM ('file', 'thumbnail'); + +CREATE TABLE IF NOT EXISTS object_keys +( + file_id BIGINT NOT NULL, + o_type OBJECT_TYPE NOT NULL, + object_key TEXT UNIQUE NOT NULL, + size bigint NOT NULL, + datacenters s3region[] NOT NULL, + is_deleted bool DEFAULT false, + created_at bigint DEFAULT now_utc_micro_seconds(), + updated_at bigint DEFAULT now_utc_micro_seconds(), + PRIMARY KEY (file_id, o_type), + CONSTRAINT fk_object_keys_file_id + FOREIGN KEY (file_id) + REFERENCES files (file_id) + ON DELETE CASCADE +); + +CREATE TRIGGER update_object_keys_updated_at + BEFORE UPDATE + ON object_keys + FOR EACH ROW +EXECUTE PROCEDURE + trigger_updated_at_microseconds_column(); + +-- copy data from existing tables to this new table. +BEGIN; +INSERT INTO object_keys(file_id, o_type, object_key, size, datacenters) +SELECT file_id, 'file', object_key,size, datacenters FROM file_object_keys; + +INSERT INTO object_keys(file_id, o_type, object_key, size, datacenters) +SELECT file_id, 'thumbnail', object_key,size, datacenters FROM thumbnail_object_keys; +COMMIT; diff --git a/server/migrations/28_queue_time_stamp.down.sql b/server/migrations/28_queue_time_stamp.down.sql new file mode 100644 index 000000000..d2674dc49 --- /dev/null +++ b/server/migrations/28_queue_time_stamp.down.sql @@ -0,0 +1,8 @@ +ALTER TABLE queue + DROP COLUMN created_at, + DROP COLUMN updated_at, + DROP COLUMN is_deleted; +DROP INDEX IF EXISTS name_and_item_unique_index; +DROP INDEX IF EXISTS q_name_create_and_is_deleted_index; + +DROP TRIGGER IF EXISTS update_queue_updated_at ON queue; diff --git a/server/migrations/28_queue_time_stamp.up.sql b/server/migrations/28_queue_time_stamp.up.sql new file mode 100644 index 000000000..895374039 --- /dev/null +++ b/server/migrations/28_queue_time_stamp.up.sql @@ -0,0 +1,14 @@ +ALTER TABLE queue + ADD COLUMN created_at bigint DEFAULT now_utc_micro_seconds(), + ADD COLUMN updated_at bigint DEFAULT now_utc_micro_seconds(), + ADD COLUMN is_deleted bool DEFAULT false; + +CREATE UNIQUE INDEX IF NOT EXISTS name_and_item_unique_index ON queue (queue_name, item); +CREATE INDEX IF NOT EXISTS q_name_create_and_is_deleted_index on queue (queue_name, created_at, is_deleted); + +CREATE TRIGGER update_queue_updated_at + BEFORE UPDATE + ON queue + FOR EACH ROW +EXECUTE PROCEDURE + trigger_updated_at_microseconds_column(); diff --git a/server/migrations/29_drop_unencrypted_email.down.sql b/server/migrations/29_drop_unencrypted_email.down.sql new file mode 100644 index 000000000..a574cc91e --- /dev/null +++ b/server/migrations/29_drop_unencrypted_email.down.sql @@ -0,0 +1,2 @@ +ALTER TABLE users + ADD COLUMN email TEXT UNIQUE; diff --git a/server/migrations/29_drop_unencrypted_email.up.sql b/server/migrations/29_drop_unencrypted_email.up.sql new file mode 100644 index 000000000..1ff46464c --- /dev/null +++ b/server/migrations/29_drop_unencrypted_email.up.sql @@ -0,0 +1,2 @@ +ALTER TABLE users + DROP COLUMN email; diff --git a/server/migrations/2_create_subscriptions.down.sql b/server/migrations/2_create_subscriptions.down.sql new file mode 100644 index 000000000..301b4dc9d --- /dev/null +++ b/server/migrations/2_create_subscriptions.down.sql @@ -0,0 +1,5 @@ +DROP TABLE subscriptions; + +DROP TABLE subscription_logs; + +DROP INDEX subscriptions_user_id_index; diff --git a/server/migrations/2_create_subscriptions.up.sql b/server/migrations/2_create_subscriptions.up.sql new file mode 100644 index 000000000..e1d55c5f4 --- /dev/null +++ b/server/migrations/2_create_subscriptions.up.sql @@ -0,0 +1,29 @@ +CREATE TABLE IF NOT EXISTS subscriptions ( + subscription_id SERIAL PRIMARY KEY, + user_id INTEGER NOT NULL, + storage_in_mbs BIGINT NOT NULL, + original_transaction_id TEXT NOT NULL, + expiry_time BIGINT NOT NULL, + created_at TIMESTAMP NOT NULL DEFAULT NOW(), + + CONSTRAINT fk_subscriptions_user_id + FOREIGN KEY(user_id) + REFERENCES users(user_id) + ON DELETE CASCADE +); + +CREATE TABLE IF NOT EXISTS subscription_logs ( + log_id SERIAL PRIMARY KEY, + user_id INTEGER NOT NULL, + payment_provider TEXT NOT NULL, + notification JSONB NOT NULL, + verification_response JSONB NOT NULL, + created_at TIMESTAMP NOT NULL DEFAULT NOW(), + + CONSTRAINT fk_subscription_logs_user_id + FOREIGN KEY(user_id) + REFERENCES users(user_id) + ON DELETE CASCADE +); + +CREATE INDEX IF NOT EXISTS subscriptions_user_id_index ON subscriptions(user_id); diff --git a/server/migrations/30_add_magic_metadata.up.sql b/server/migrations/30_add_magic_metadata.up.sql new file mode 100644 index 000000000..fef903649 --- /dev/null +++ b/server/migrations/30_add_magic_metadata.up.sql @@ -0,0 +1,2 @@ +ALTER TABLE files + ADD COLUMN magic_metadata JSONB; diff --git a/server/migrations/30_drop_magic_metadata.down.sql b/server/migrations/30_drop_magic_metadata.down.sql new file mode 100644 index 000000000..e9f5b1b18 --- /dev/null +++ b/server/migrations/30_drop_magic_metadata.down.sql @@ -0,0 +1,2 @@ +ALTER TABLE files + DROP COLUMN magic_metadata; diff --git a/server/migrations/31_add_pub_magic_metadata.up.sql b/server/migrations/31_add_pub_magic_metadata.up.sql new file mode 100644 index 000000000..1fb65f63b --- /dev/null +++ b/server/migrations/31_add_pub_magic_metadata.up.sql @@ -0,0 +1,2 @@ +ALTER TABLE files + ADD COLUMN pub_magic_metadata JSONB; diff --git a/server/migrations/31_drop_pub_magic_metadata.down.sql b/server/migrations/31_drop_pub_magic_metadata.down.sql new file mode 100644 index 000000000..9b3e3c62e --- /dev/null +++ b/server/migrations/31_drop_pub_magic_metadata.down.sql @@ -0,0 +1,2 @@ +ALTER TABLE files + DROP COLUMN pub_magic_metadata; diff --git a/server/migrations/32_add_trash_table.down.sql b/server/migrations/32_add_trash_table.down.sql new file mode 100644 index 000000000..03f7ed614 --- /dev/null +++ b/server/migrations/32_add_trash_table.down.sql @@ -0,0 +1,4 @@ +DROP TRIGGER IF EXISTS update_trash_updated_at ON trash; +ALTER TABLE trash DROP CONSTRAINT IF EXISTS trash_state_constraint; +DROP INDEX trash_updated_at_time_index; +DROP TABLE IF EXISTS trash; diff --git a/server/migrations/32_add_trash_table.up.sql b/server/migrations/32_add_trash_table.up.sql new file mode 100644 index 000000000..8625271b7 --- /dev/null +++ b/server/migrations/32_add_trash_table.up.sql @@ -0,0 +1,30 @@ +CREATE TABLE IF NOT EXISTS trash +( + file_id BIGINT NOT NULL, + user_id BIGINT NOT NULL, + collection_id BIGINT NOT NULL, + -- is_deleted true indicates file has been deleted and cannot be restored. + is_deleted bool NOT NULL DEFAULT false, + -- true indicates file was moved to trash but user restored it before deletion. + is_restored bool NOT NULL default false, + created_at bigint NOT NULL DEFAULT now_utc_micro_seconds(), + updated_at bigint NOT NULL DEFAULT now_utc_micro_seconds(), + delete_by bigint NOT NULL, + PRIMARY KEY (file_id), + CONSTRAINT fk_trash_keys_collection_files + FOREIGN KEY (file_id, collection_id) + REFERENCES collection_files (file_id, collection_id) + ON DELETE NO ACTION +); + +CREATE INDEX IF NOT EXISTS trash_updated_at_time_index ON trash (updated_at); + +ALTER TABLE trash + ADD CONSTRAINT trash_state_constraint CHECK (is_deleted is FALSE or is_restored is FALSE); + +CREATE TRIGGER update_trash_updated_at + BEFORE UPDATE + ON trash + FOR EACH ROW +EXECUTE PROCEDURE + trigger_updated_at_microseconds_column(); diff --git a/server/migrations/33_change_update_at_column_trigger.down.sql b/server/migrations/33_change_update_at_column_trigger.down.sql new file mode 100644 index 000000000..ddf82c942 --- /dev/null +++ b/server/migrations/33_change_update_at_column_trigger.down.sql @@ -0,0 +1,9 @@ +create or replace function trigger_updated_at_microseconds_column() returns trigger + language plpgsql +as +$$ +BEGIN + NEW.updated_at = now_utc_micro_seconds(); + RETURN NEW; +END; +$$; diff --git a/server/migrations/33_change_update_at_column_trigger.up.sql b/server/migrations/33_change_update_at_column_trigger.up.sql new file mode 100644 index 000000000..f7f0681ae --- /dev/null +++ b/server/migrations/33_change_update_at_column_trigger.up.sql @@ -0,0 +1,16 @@ +-- Replace existing trigger to honor new value of update_at if it's greater than old updated_at +-- The equals check handles the case where the application is only modifying certain property of a row (not updateAt) like file restored or deleted flag. In such cases, the old row's and new row's timestamp will be same, so we are incrementing it. + +-- The greater than case handles the case where if application is setting updateAt timestamp lower than currentTimestamp. Ideally, the version should always increase otherwise the diff on client will fail. + +create or replace function trigger_updated_at_microseconds_column() returns trigger + language plpgsql +as +$$ +BEGIN + IF OLD.updated_at >= NEW.updated_at THEN + NEW.updated_at = now_utc_micro_seconds(); + END IF; + RETURN NEW; +END; +$$; diff --git a/server/migrations/34_trash_delete_by_idx.down.sql b/server/migrations/34_trash_delete_by_idx.down.sql new file mode 100644 index 000000000..5978014de --- /dev/null +++ b/server/migrations/34_trash_delete_by_idx.down.sql @@ -0,0 +1 @@ +DROP INDEX IF EXISTS trash_delete_by_idx; diff --git a/server/migrations/34_trash_delete_by_idx.up.sql b/server/migrations/34_trash_delete_by_idx.up.sql new file mode 100644 index 000000000..6cf3476f9 --- /dev/null +++ b/server/migrations/34_trash_delete_by_idx.up.sql @@ -0,0 +1,2 @@ +CREATE INDEX trash_delete_by_idx ON trash (delete_by) + WHERE (trash.is_deleted is FALSE AND trash.is_restored is FALSE); diff --git a/server/migrations/35_add_push_tokens_table.down.sql b/server/migrations/35_add_push_tokens_table.down.sql new file mode 100644 index 000000000..d62464cfe --- /dev/null +++ b/server/migrations/35_add_push_tokens_table.down.sql @@ -0,0 +1,3 @@ +DROP TRIGGER IF EXISTS update_push_tokens_updated_at ON push_tokens; +DROP INDEX IF EXISTS push_tokens_last_notified_at_index; +DROP TABLE IF EXISTS push_tokens; diff --git a/server/migrations/35_add_push_tokens_table.up.sql b/server/migrations/35_add_push_tokens_table.up.sql new file mode 100644 index 000000000..2e8bd2a98 --- /dev/null +++ b/server/migrations/35_add_push_tokens_table.up.sql @@ -0,0 +1,23 @@ +CREATE TABLE IF NOT EXISTS push_tokens +( + user_id BIGINT NOT NULL, + fcm_token TEXT NOT NULL, + apns_token TEXT, + created_at bigint NOT NULL DEFAULT now_utc_micro_seconds(), + updated_at bigint NOT NULL DEFAULT now_utc_micro_seconds(), + last_notified_at bigint NOT NULL DEFAULT now_utc_micro_seconds(), + PRIMARY KEY (fcm_token), + CONSTRAINT fk_push_tokens_user_id + FOREIGN KEY (user_id) + REFERENCES users (user_id) + ON DELETE CASCADE +); + +CREATE INDEX IF NOT EXISTS push_tokens_last_notified_at_index ON push_tokens (last_notified_at); + +CREATE TRIGGER update_push_tokens_updated_at + BEFORE UPDATE + ON push_tokens + FOR EACH ROW +EXECUTE PROCEDURE + trigger_updated_at_microseconds_column(); diff --git a/server/migrations/36_update_key_attr_and_tokens.down.sql b/server/migrations/36_update_key_attr_and_tokens.down.sql new file mode 100644 index 000000000..e6b445ee8 --- /dev/null +++ b/server/migrations/36_update_key_attr_and_tokens.down.sql @@ -0,0 +1,7 @@ +ALTER TABLE key_attributes + DROP COLUMN created_at; + +ALTER TABLE tokens + DROP COLUMN is_deleted; +ALTER TABLE tokens + DROP COLUMN last_used_at; diff --git a/server/migrations/36_update_key_attr_and_tokens.up.sql b/server/migrations/36_update_key_attr_and_tokens.up.sql new file mode 100644 index 000000000..60dc4fa83 --- /dev/null +++ b/server/migrations/36_update_key_attr_and_tokens.up.sql @@ -0,0 +1,25 @@ +BEGIN; +ALTER TABLE key_attributes + ADD COLUMN IF NOT EXISTS created_at bigint DEFAULT now_utc_micro_seconds(); +UPDATE key_attributes k +SET created_at = u.creation_time +FROM users u +where k.user_id = u.user_id; + +ALTER TABLE key_attributes + ALTER COLUMN created_at SET NOT NULL; +COMMIT; + +BEGIN; +ALTER table tokens + ADD COLUMN IF NOT EXISTS is_deleted bool DEFAULT FALSE, + ADD COLUMN IF NOT EXISTS last_used_at bigint DEFAULT now_utc_micro_seconds(); + +UPDATE tokens +SET last_used_at = creation_time, + is_deleted = FALSE; + +ALTER TABLE tokens + ALTER COLUMN is_deleted SET NOT NULL, + ALTER COLUMN last_used_at SET NOT NULL; +COMMIT; diff --git a/server/migrations/37_public_collection.down.sql b/server/migrations/37_public_collection.down.sql new file mode 100644 index 000000000..2677fe532 --- /dev/null +++ b/server/migrations/37_public_collection.down.sql @@ -0,0 +1,13 @@ +DROP TABLE IF EXISTS public_abuse_report; +DROP INDEX IF EXISTS public_abuse_share_id_idx; + +DROP TABLE IF EXISTS public_collection_access_history; +DROP INDEX IF EXISTS public_access_share_id_idx; + + +DROP TABLE IF EXISTS public_collection_tokens; +DROP INDEX IF EXISTS public_access_tokens_unique_idx; +DROP INDEX IF EXISTS public_active_collection_unique_idx; + +DROP TRIGGER IF EXISTS update_public_collection_tokens_updated_at on public_collection_tokens; +DROP TRIGGER IF EXISTS trigger_collection_updation_time_on_collection_tokens_updated on public_collection_tokens; diff --git a/server/migrations/37_public_collection.up.sql b/server/migrations/37_public_collection.up.sql new file mode 100644 index 000000000..66c911913 --- /dev/null +++ b/server/migrations/37_public_collection.up.sql @@ -0,0 +1,78 @@ +CREATE TABLE IF NOT EXISTS public_collection_tokens +( + id bigint primary key generated always as identity, + collection_id BIGINT NOT NULL, + access_token TEXT NOT NULL, + is_disabled bool not null DEFAULT FALSE, + -- 0 value for valid_till indicates that the link never expires. + valid_till bigint not null DEFAULT 0, + -- 0 device limit indicates no limit + device_limit int not null DEFAULT 0, + created_at bigint NOT NULL DEFAULT now_utc_micro_seconds(), + updated_at bigint NOT NULL DEFAULT now_utc_micro_seconds(), + CONSTRAINT fk_public_tokens_collection_id + FOREIGN KEY (collection_id) + REFERENCES collections (collection_id) + ON DELETE CASCADE +); + +CREATE UNIQUE INDEX IF NOT EXISTS public_active_collection_unique_idx ON public_collection_tokens (collection_id, is_disabled) WHERE is_disabled = FALSE; +CREATE UNIQUE INDEX IF NOT EXISTS public_access_tokens_unique_idx ON public_collection_tokens (access_token); + +CREATE TABLE IF NOT EXISTS public_collection_access_history +( + share_id bigint, + ip text not null, + user_agent text not null, + created_at bigint NOT NULL DEFAULT now_utc_micro_seconds(), + CONSTRAINT unique_access_sid_ip_ua UNIQUE (share_id, ip, user_agent), + CONSTRAINT fk_public_history_token_id + FOREIGN KEY (share_id) + REFERENCES public_collection_tokens (id) + ON DELETE CASCADE +); + +CREATE INDEX IF NOT EXISTS public_access_share_id_idx ON public_collection_access_history (share_id); + +CREATE TABLE IF NOT EXISTS public_abuse_report +( + share_id bigint, + ip text not null, + user_agent text not null, + url text not null, + reason text not null, + u_comment varchar(10000) not null DEFAULT '', + created_at bigint NOT NULL DEFAULT now_utc_micro_seconds(), + CONSTRAINT unique_report_sid_ip_ua UNIQUE (share_id, ip, user_agent), + CONSTRAINT fk_public_abuse_report_token_id + FOREIGN KEY (share_id) + REFERENCES public_collection_tokens (id) + ON DELETE CASCADE +); + +CREATE INDEX IF NOT EXISTS public_abuse_share_id_idx ON public_abuse_report (share_id); + +CREATE TRIGGER update_public_collection_tokens_updated_at + BEFORE UPDATE + ON public_collection_tokens + FOR EACH ROW +EXECUTE PROCEDURE + trigger_updated_at_microseconds_column(); + +CREATE OR REPLACE FUNCTION fn_update_collections_updation_time_using_update_at() RETURNS TRIGGER AS $$ +BEGIN + -- + IF (TG_OP = 'UPDATE' OR TG_OP = 'INSERT') THEN + UPDATE collections SET updation_time = NEW.updated_at where collection_id = new.collection_id and + updation_time < New.updated_at; + RETURN NEW; + END IF; +END; +$$ LANGUAGE plpgsql; + +CREATE TRIGGER trigger_collection_updation_time_on_collection_tokens_updated + AFTER INSERT OR UPDATE + ON public_collection_tokens + FOR EACH ROW +EXECUTE PROCEDURE + fn_update_collections_updation_time_using_update_at(); \ No newline at end of file diff --git a/server/migrations/38_add_abuse_report_detail_json.down.sql b/server/migrations/38_add_abuse_report_detail_json.down.sql new file mode 100644 index 000000000..315036204 --- /dev/null +++ b/server/migrations/38_add_abuse_report_detail_json.down.sql @@ -0,0 +1,3 @@ +ALTER TABLE public_abuse_report + DROP COLUMN details, + ADD COLUMN u_comment varchar(10000) not null DEFAULT ''; diff --git a/server/migrations/38_add_abuse_report_detail_json.up.sql b/server/migrations/38_add_abuse_report_detail_json.up.sql new file mode 100644 index 000000000..b9d3e626d --- /dev/null +++ b/server/migrations/38_add_abuse_report_detail_json.up.sql @@ -0,0 +1,3 @@ +ALTER TABLE public_abuse_report + ADD COLUMN details JSONB, + DROP COLUMN u_comment; diff --git a/server/migrations/39_drop_file_and_thumbnail_object_keys.down.sql b/server/migrations/39_drop_file_and_thumbnail_object_keys.down.sql new file mode 100644 index 000000000..c5cff6c92 --- /dev/null +++ b/server/migrations/39_drop_file_and_thumbnail_object_keys.down.sql @@ -0,0 +1 @@ +--- nothing diff --git a/server/migrations/39_drop_file_and_thumbnail_object_keys.up.sql b/server/migrations/39_drop_file_and_thumbnail_object_keys.up.sql new file mode 100644 index 000000000..9e14d2cad --- /dev/null +++ b/server/migrations/39_drop_file_and_thumbnail_object_keys.up.sql @@ -0,0 +1,3 @@ +DROP TABLE IF EXISTS file_object_keys; + +DROP TABLE IF EXISTS thumbnail_object_keys; diff --git a/server/migrations/3_add_encypted_collection_name.down.sql b/server/migrations/3_add_encypted_collection_name.down.sql new file mode 100644 index 000000000..aa4faf573 --- /dev/null +++ b/server/migrations/3_add_encypted_collection_name.down.sql @@ -0,0 +1,4 @@ +ALTER TABLE collections + DROP COLUMN encrypted_name + DROP COLUMN name_decryption_nonce + ALTER COLUMN name SET NOT NULL; diff --git a/server/migrations/3_add_encypted_collection_name.up.sql b/server/migrations/3_add_encypted_collection_name.up.sql new file mode 100644 index 000000000..733897ae8 --- /dev/null +++ b/server/migrations/3_add_encypted_collection_name.up.sql @@ -0,0 +1,4 @@ +ALTER TABLE collections + ADD COLUMN encrypted_name TEXT, + ADD COLUMN name_decryption_nonce TEXT, + ALTER COLUMN name DROP NOT NULL; diff --git a/server/migrations/40_advance_url_settings.down.sql b/server/migrations/40_advance_url_settings.down.sql new file mode 100644 index 000000000..30b2ba043 --- /dev/null +++ b/server/migrations/40_advance_url_settings.down.sql @@ -0,0 +1,13 @@ +BEGIN; +ALTER table public_collection_tokens + DROP COLUMN IF EXISTS pw_hash, + DROP COLUMN IF EXISTS pw_nonce, + DROP COLUMN IF EXISTS mem_limit, + DROP COLUMN IF EXISTS ops_limit, + DROP COLUMN IF EXISTS enable_download, + DROP COLUMN IF EXISTS enable_comment; + + +ALTER TABLE public_collection_tokens + DROP CONSTRAINT IF EXISTS pct_pw_state_constraint; +COMMIT; diff --git a/server/migrations/40_advance_url_settings.up.sql b/server/migrations/40_advance_url_settings.up.sql new file mode 100644 index 000000000..afddf007c --- /dev/null +++ b/server/migrations/40_advance_url_settings.up.sql @@ -0,0 +1,21 @@ +BEGIN; +ALTER table public_collection_tokens + ADD COLUMN IF NOT EXISTS pw_hash TEXT, + ADD COLUMN IF NOT EXISTS pw_nonce TEXT, + ADD COLUMN IF NOT EXISTS mem_limit BIGINT, + ADD COLUMN IF NOT EXISTS ops_limit BIGINT, + ADD COLUMN IF NOT EXISTS enable_download bool DEFAULT TRUE, + ADD COLUMN IF NOT EXISTS enable_comment bool DEFAULT FALSE; + +UPDATE public_collection_tokens +SET enable_download = TRUE, + enable_comment = FALSE; + +ALTER TABLE public_collection_tokens + ALTER COLUMN enable_download SET NOT NULL, + ALTER COLUMN enable_comment SET NOT NULL; + +ALTER TABLE public_collection_tokens + ADD CONSTRAINT pct_pw_state_constraint CHECK ( (pw_hash is NULL and pw_nonce is NULL) or + (pw_hash is NOT NULL and pw_nonce is NOT NULL)); +COMMIT; diff --git a/server/migrations/41_add_file_info.down.sql b/server/migrations/41_add_file_info.down.sql new file mode 100644 index 000000000..21f7982f0 --- /dev/null +++ b/server/migrations/41_add_file_info.down.sql @@ -0,0 +1,2 @@ +ALTER TABLE files + DROP COLUMN info; diff --git a/server/migrations/41_add_file_info.up.sql b/server/migrations/41_add_file_info.up.sql new file mode 100644 index 000000000..5b2ca06db --- /dev/null +++ b/server/migrations/41_add_file_info.up.sql @@ -0,0 +1,2 @@ +ALTER TABLE files + ADD COLUMN info JSONB; diff --git a/server/migrations/42_collection_add_magic_metadata.up.sql b/server/migrations/42_collection_add_magic_metadata.up.sql new file mode 100644 index 000000000..9949a106e --- /dev/null +++ b/server/migrations/42_collection_add_magic_metadata.up.sql @@ -0,0 +1,2 @@ +ALTER TABLE collections + ADD COLUMN magic_metadata JSONB; diff --git a/server/migrations/42_collection_drop_magic_metadata.down.sql b/server/migrations/42_collection_drop_magic_metadata.down.sql new file mode 100644 index 000000000..ded2f46e9 --- /dev/null +++ b/server/migrations/42_collection_drop_magic_metadata.down.sql @@ -0,0 +1,2 @@ +ALTER TABLE collections + DROP COLUMN magic_metadata; diff --git a/server/migrations/43_family_plan.down.sql b/server/migrations/43_family_plan.down.sql new file mode 100644 index 000000000..a542b39a3 --- /dev/null +++ b/server/migrations/43_family_plan.down.sql @@ -0,0 +1,7 @@ +ALTER TABLE users + DROP COLUMN family_admin_id; +DROP TRIGGER IF EXISTS update_families_updated_at ON families; +DROP INDEX IF EXISTS fk_families_admin_id; +DROP INDEX IF EXISTS uidx_one_family_check; +DROP INDEX IF EXISTS uidx_families_member_mapping; +DROP TABLE families; diff --git a/server/migrations/43_family_plan.up.sql b/server/migrations/43_family_plan.up.sql new file mode 100644 index 000000000..896a89a19 --- /dev/null +++ b/server/migrations/43_family_plan.up.sql @@ -0,0 +1,44 @@ +ALTER TABLE users + ADD COLUMN family_admin_id BIGINT; + +CREATE TABLE IF NOT EXISTS families +( + id uuid PRIMARY KEY NOT NULL, + admin_id BIGINT NOT NULL, + member_id BIGINT NOT NULL, +-- status indicates the member status +-- SELF/CLOSED are the state of the admin member when they create a family group or close it. + + status TEXT NOT NULL CHECK (status IN + ('SELF', 'CLOSED', 'INVITED', 'ACCEPTED', 'DECLINED', 'REVOKED', 'REMOVED', + 'LEFT')), + token TEXT UNIQUE, + percentage INTEGER NOT NULL DEFAULT -1, + created_at bigint NOT NULL DEFAULT now_utc_micro_seconds(), + updated_at bigint NOT NULL DEFAULT now_utc_micro_seconds(), + CONSTRAINT fk_family_admin_id + FOREIGN KEY (admin_id) + REFERENCES users (user_id) + ON DELETE CASCADE +); + +CREATE INDEX IF NOT EXISTS fk_families_admin_id ON families (admin_id); + +-- check to ensure that the member is not part of or is admin of another family group +CREATE UNIQUE INDEX uidx_one_family_check on families (member_id, status) where status in ('ACCEPTED', 'SELF'); + +-- index to ensure that there's only one entry for admin and member. +CREATE UNIQUE INDEX uidx_families_member_mapping on families (admin_id, member_id); + +ALTER TABLE families + ADD CONSTRAINT families_member_state_constraint CHECK ( + (admin_id != member_id and status not in ('SELF','CLOSED') or (admin_id = member_id and status in ('SELF', 'CLOSED')))); + + +CREATE TRIGGER update_families_updated_at + BEFORE UPDATE + ON families + FOR EACH ROW +EXECUTE PROCEDURE + trigger_updated_at_microseconds_column(); + diff --git a/server/migrations/44_trash_add_index.down.sql b/server/migrations/44_trash_add_index.down.sql new file mode 100644 index 000000000..a9b032bf5 --- /dev/null +++ b/server/migrations/44_trash_add_index.down.sql @@ -0,0 +1 @@ +DROP INDEX IF NOT EXISTS trash_user_id_and_updated_at_index; \ No newline at end of file diff --git a/server/migrations/44_trash_add_index.up.sql b/server/migrations/44_trash_add_index.up.sql new file mode 100644 index 000000000..3d800734d --- /dev/null +++ b/server/migrations/44_trash_add_index.up.sql @@ -0,0 +1 @@ +CREATE INDEX IF NOT EXISTS trash_user_id_and_updated_at_index ON trash (user_id, updated_at); \ No newline at end of file diff --git a/server/migrations/45_collection_files_file_id_idx.down.sql b/server/migrations/45_collection_files_file_id_idx.down.sql new file mode 100644 index 000000000..17a5af281 --- /dev/null +++ b/server/migrations/45_collection_files_file_id_idx.down.sql @@ -0,0 +1 @@ +DROP INDEX IF EXISTS collection_files_file_id_index; diff --git a/server/migrations/45_collection_files_file_id_idx.up.sql b/server/migrations/45_collection_files_file_id_idx.up.sql new file mode 100644 index 000000000..6b49cc6a4 --- /dev/null +++ b/server/migrations/45_collection_files_file_id_idx.up.sql @@ -0,0 +1 @@ +CREATE INDEX CONCURRENTLY IF NOT EXISTS collection_files_file_id_index ON collection_files (file_id); diff --git a/server/migrations/46_replication_v2.down.sql b/server/migrations/46_replication_v2.down.sql new file mode 100644 index 000000000..6278b15c2 --- /dev/null +++ b/server/migrations/46_replication_v2.down.sql @@ -0,0 +1,2 @@ +-- noop since we can't remove a value from an enum: https://www.postgresql.org/docs/current/datatype-enum.html +-- and a migration is expensive diff --git a/server/migrations/46_replication_v2.up.sql b/server/migrations/46_replication_v2.up.sql new file mode 100644 index 000000000..156879168 --- /dev/null +++ b/server/migrations/46_replication_v2.up.sql @@ -0,0 +1 @@ +ALTER TYPE s3region ADD VALUE 'scw-eu-fr-locked'; diff --git a/server/migrations/47_wasabi_integration.down.sql b/server/migrations/47_wasabi_integration.down.sql new file mode 100644 index 000000000..6278b15c2 --- /dev/null +++ b/server/migrations/47_wasabi_integration.down.sql @@ -0,0 +1,2 @@ +-- noop since we can't remove a value from an enum: https://www.postgresql.org/docs/current/datatype-enum.html +-- and a migration is expensive diff --git a/server/migrations/47_wasabi_integration.up.sql b/server/migrations/47_wasabi_integration.up.sql new file mode 100644 index 000000000..ed76ee6aa --- /dev/null +++ b/server/migrations/47_wasabi_integration.up.sql @@ -0,0 +1 @@ +ALTER TYPE s3region ADD VALUE 'wasabi-eu-central-2'; diff --git a/server/migrations/48_add_stripe_account.down.sql b/server/migrations/48_add_stripe_account.down.sql new file mode 100644 index 000000000..aa7b7fb92 --- /dev/null +++ b/server/migrations/48_add_stripe_account.down.sql @@ -0,0 +1,4 @@ +UPDATE + subscriptions +SET + attributes = attributes - 'stripeAccountCountry'; diff --git a/server/migrations/48_add_stripe_account.up.sql b/server/migrations/48_add_stripe_account.up.sql new file mode 100644 index 000000000..472bd3f88 --- /dev/null +++ b/server/migrations/48_add_stripe_account.up.sql @@ -0,0 +1,10 @@ +UPDATE + subscriptions +SET + attributes = jsonb_set( + attributes, + '{stripeAccountCountry}', + '"IN"' + ) +WHERE + payment_provider = 'stripe'; diff --git a/server/migrations/49_add_unique_idx_txn_id.down.sql b/server/migrations/49_add_unique_idx_txn_id.down.sql new file mode 100644 index 000000000..83c95d7f1 --- /dev/null +++ b/server/migrations/49_add_unique_idx_txn_id.down.sql @@ -0,0 +1 @@ +DROP INDEX IF EXISTS sub_original_txn_id_index; diff --git a/server/migrations/49_add_unique_idx_txn_id.up.sql b/server/migrations/49_add_unique_idx_txn_id.up.sql new file mode 100644 index 000000000..f2426e7ce --- /dev/null +++ b/server/migrations/49_add_unique_idx_txn_id.up.sql @@ -0,0 +1,3 @@ +CREATE UNIQUE INDEX IF NOT EXISTS sub_original_txn_id_index +ON subscriptions (original_transaction_id) +WHERE original_transaction_id is not null and original_transaction_id != 'none'; diff --git a/server/migrations/4_update_subscriptions_schema.down.sql b/server/migrations/4_update_subscriptions_schema.down.sql new file mode 100644 index 000000000..051b1463c --- /dev/null +++ b/server/migrations/4_update_subscriptions_schema.down.sql @@ -0,0 +1,6 @@ +ALTER TABLE subscriptions + DROP COLUMN product_id, + DROP COLUMN payment_provider, + DROP COLUMN latest_verification_data; + +DROP INDEX subscriptions_expiry_time_index; diff --git a/server/migrations/4_update_subscriptions_schema.up.sql b/server/migrations/4_update_subscriptions_schema.up.sql new file mode 100644 index 000000000..09112d0a3 --- /dev/null +++ b/server/migrations/4_update_subscriptions_schema.up.sql @@ -0,0 +1,6 @@ +ALTER TABLE subscriptions + ADD COLUMN product_id TEXT NOT NULL, + ADD COLUMN payment_provider TEXT NOT NULL, + ADD COLUMN latest_verification_data TEXT NOT NULL; + +CREATE INDEX IF NOT EXISTS subscriptions_expiry_time_index ON subscriptions (expiry_time); diff --git a/server/migrations/50_remote_store.down.sql b/server/migrations/50_remote_store.down.sql new file mode 100644 index 000000000..b4ed36746 --- /dev/null +++ b/server/migrations/50_remote_store.down.sql @@ -0,0 +1,2 @@ +DROP TRIGGER IF EXISTS update_remote_store_updated_at ON remote_store; +DROP TABLE IF EXISTS remote_store; diff --git a/server/migrations/50_remote_store.up.sql b/server/migrations/50_remote_store.up.sql new file mode 100644 index 000000000..8493f81bc --- /dev/null +++ b/server/migrations/50_remote_store.up.sql @@ -0,0 +1,22 @@ +CREATE TABLE IF NOT EXISTS remote_store +( + user_id BIGINT NOT NULL, + key_name TEXT NOT NULL, + key_value TEXT NOT NULL, + created_at bigint NOT NULL DEFAULT now_utc_micro_seconds(), + updated_at bigint NOT NULL DEFAULT now_utc_micro_seconds(), + PRIMARY KEY (user_id, key_name), + CONSTRAINT fk_remote_store_user_id + FOREIGN KEY (user_id) + REFERENCES users (user_id) + ON DELETE CASCADE +); + + +CREATE TRIGGER update_remote_store_updated_at + BEFORE UPDATE + ON remote_store + FOR EACH ROW +EXECUTE PROCEDURE + trigger_updated_at_microseconds_column(); + diff --git a/server/migrations/51_th_time_human.down.sql b/server/migrations/51_th_time_human.down.sql new file mode 100644 index 000000000..23b09ebf2 --- /dev/null +++ b/server/migrations/51_th_time_human.down.sql @@ -0,0 +1 @@ +DROP function th(bigint); diff --git a/server/migrations/51_th_time_human.up.sql b/server/migrations/51_th_time_human.up.sql new file mode 100644 index 000000000..843961ee8 --- /dev/null +++ b/server/migrations/51_th_time_human.up.sql @@ -0,0 +1,9 @@ +create or replace function th(epochTimeinMircoSeconds bigint) +returns TIMESTAMP WITH TIME ZONE +language plpgsql +as +$$ +begin + return to_timestamp(cast(epochTimeinMircoSeconds/1000000 as bigint)); +end; +$$; diff --git a/server/migrations/52_authenticator.down.sql b/server/migrations/52_authenticator.down.sql new file mode 100644 index 000000000..0d3971080 --- /dev/null +++ b/server/migrations/52_authenticator.down.sql @@ -0,0 +1,6 @@ +DROP TRIGGER IF EXISTS update_authenticator_entity_updated_at ON authenticator_entity; +DROP INDEX IF EXISTS authenticator_entity_updated_at_time_index; +DROP INDEX IF EXISTS authenticator_entity_state_constraint; +DROP TRIGGER IF EXISTS trigger_authenticator_key_updated_time_on_authenticator_entity_updation on authenticator_entity; +DROP TABLE IF EXISTS authenticator_entity; +DROP TABLE IF EXISTS authenticator_key; diff --git a/server/migrations/52_authenticator.up.sql b/server/migrations/52_authenticator.up.sql new file mode 100644 index 000000000..79f94180a --- /dev/null +++ b/server/migrations/52_authenticator.up.sql @@ -0,0 +1,57 @@ + + CREATE TABLE IF NOT EXISTS authenticator_key ( + user_id BIGINT PRIMARY KEY NOT NULL, + encrypted_key TEXT NOT NULL, + header TEXT NOT NULL, + created_at BIGINT NOT NULL DEFAULT now_utc_micro_seconds(), + updated_at BIGINT NOT NULL DEFAULT now_utc_micro_seconds(), + CONSTRAINT fk_authenticator_key_user_id FOREIGN KEY (user_id) REFERENCES users ( + user_id) ON DELETE CASCADE + ); + + + CREATE TABLE IF NOT EXISTS authenticator_entity + ( + id uuid PRIMARY KEY NOT NULL, + user_id BIGINT NOT NULL, + encrypted_data TEXT, + header TEXT, + created_at BIGINT NOT NULL DEFAULT now_utc_micro_seconds(), + updated_at BIGINT NOT NULL DEFAULT now_utc_micro_seconds(), + is_deleted BOOLEAN DEFAULT FALSE, + CONSTRAINT fk_authenticator_key_user_id FOREIGN KEY (user_id) REFERENCES authenticator_key ( + user_id) ON DELETE CASCADE + ); + +CREATE INDEX IF NOT EXISTS authenticator_entity_updated_at_time_index ON authenticator_entity (user_id, updated_at); + +ALTER TABLE authenticator_entity + ADD CONSTRAINT authenticator_entity_state_constraint CHECK ((is_deleted is TRUE AND encrypted_data IS NULL) or (is_deleted is FALSE AND encrypted_data IS NOT NULL)); + +CREATE TRIGGER update_authenticator_entity_updated_at + BEFORE UPDATE + ON authenticator_entity + FOR EACH ROW +EXECUTE PROCEDURE + trigger_updated_at_microseconds_column(); + + +-- This function updates the authenticator_key updated_at if the relevant authenticator entry is changed +CREATE OR REPLACE FUNCTION fn_update_authenticator_key_updated_at_via_updated_at() RETURNS TRIGGER AS $$ +BEGIN + -- + IF (TG_OP = 'UPDATE' OR TG_OP = 'INSERT') THEN + UPDATE authenticator_key SET updated_at = NEW.updated_at where user_id = new.user_id and + updated_at < New.updated_at; + RETURN NEW; + END IF; +END; +$$ LANGUAGE plpgsql; + +CREATE TRIGGER trigger_authenticator_key_updated_time_on_authenticator_entity_updation + AFTER INSERT OR UPDATE + ON authenticator_entity + FOR EACH ROW +EXECUTE PROCEDURE + fn_update_authenticator_key_updated_at_via_updated_at(); + diff --git a/server/migrations/53_add_apps.down.sql b/server/migrations/53_add_apps.down.sql new file mode 100644 index 000000000..4087e67a5 --- /dev/null +++ b/server/migrations/53_add_apps.down.sql @@ -0,0 +1,5 @@ +ALTER TABLE otts DROP COLUMN app; + +ALTER TABLE tokens DROP COLUMN app; + +DROP TYPE app; diff --git a/server/migrations/53_add_apps.up.sql b/server/migrations/53_add_apps.up.sql new file mode 100644 index 000000000..a3fea4f7e --- /dev/null +++ b/server/migrations/53_add_apps.up.sql @@ -0,0 +1,5 @@ +CREATE TYPE app AS ENUM ('photos','auth'); + +ALTER TABLE tokens ADD COLUMN app app NOT NULL DEFAULT 'photos'; + +ALTER TABLE otts ADD COLUMN app app NOT NULL DEFAULT 'photos'; diff --git a/server/migrations/54_update_public_tokens.down.sql b/server/migrations/54_update_public_tokens.down.sql new file mode 100644 index 000000000..a579253c4 --- /dev/null +++ b/server/migrations/54_update_public_tokens.down.sql @@ -0,0 +1 @@ +ALTER TABLE public_collection_tokens DROP COLUMN IF EXISTS enable_collect; diff --git a/server/migrations/54_update_public_tokens.up.sql b/server/migrations/54_update_public_tokens.up.sql new file mode 100644 index 000000000..e08defcef --- /dev/null +++ b/server/migrations/54_update_public_tokens.up.sql @@ -0,0 +1 @@ +ALTER TABLE public_collection_tokens ADD COLUMN enable_collect bool NOT NULL DEFAULT FALSE; diff --git a/server/migrations/55_extend_subs.down.sql b/server/migrations/55_extend_subs.down.sql new file mode 100644 index 000000000..63a493410 --- /dev/null +++ b/server/migrations/55_extend_subs.down.sql @@ -0,0 +1 @@ +-- do nothing diff --git a/server/migrations/55_extend_subs.up.sql b/server/migrations/55_extend_subs.up.sql new file mode 100644 index 000000000..d783ead4e --- /dev/null +++ b/server/migrations/55_extend_subs.up.sql @@ -0,0 +1,6 @@ +update subscriptions + set expiry_time = 1704067200000000 -- 01.01.2024 + where + product_id = 'free' and + storage = 1073741824 and -- ignore those whose plans we upgraded manually + expiry_time < 1672531200000000; -- 01.01.2023 diff --git a/server/migrations/56_add_uncategorized_type_constraint.down.sql b/server/migrations/56_add_uncategorized_type_constraint.down.sql new file mode 100644 index 000000000..885ba4285 --- /dev/null +++ b/server/migrations/56_add_uncategorized_type_constraint.down.sql @@ -0,0 +1 @@ +DROP INDEX IF EXISTS collections_uncategorized_constraint_index; diff --git a/server/migrations/56_add_uncategorized_type_constraint.up.sql b/server/migrations/56_add_uncategorized_type_constraint.up.sql new file mode 100644 index 000000000..c3882781c --- /dev/null +++ b/server/migrations/56_add_uncategorized_type_constraint.up.sql @@ -0,0 +1,3 @@ +-- Migration script for collections with bad type due to bug on mobile client & missing validation on server +update collections set type='album' where type='CollectionType.album'; +CREATE UNIQUE INDEX IF NOT EXISTS collections_uncategorized_constraint_index ON collections (owner_id) WHERE (type = 'uncategorized'); diff --git a/server/migrations/57_object_copies.down.sql b/server/migrations/57_object_copies.down.sql new file mode 100644 index 000000000..b5b2824fb --- /dev/null +++ b/server/migrations/57_object_copies.down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS object_copies; diff --git a/server/migrations/57_object_copies.up.sql b/server/migrations/57_object_copies.up.sql new file mode 100644 index 000000000..4832bcb65 --- /dev/null +++ b/server/migrations/57_object_copies.up.sql @@ -0,0 +1,26 @@ +CREATE TABLE IF NOT EXISTS object_copies ( + object_key TEXT PRIMARY KEY, + b2 BIGINT, + want_b2 BOOLEAN, + wasabi BIGINT, + want_wasabi BOOLEAN, + scw BIGINT, + want_scw BOOLEAN, + last_attempt BIGINT NOT NULL DEFAULT 0, + CONSTRAINT fk_object_copies_object_key FOREIGN KEY (object_key) + REFERENCES object_keys (object_key) ON DELETE CASCADE +); + +CREATE INDEX IF NOT EXISTS object_copies_wasabi_null_index +ON object_copies (wasabi) WHERE wasabi IS NULL AND want_wasabi = true; + +CREATE INDEX IF NOT EXISTS object_copies_scw_null_index +ON object_copies (scw) WHERE scw IS NULL AND want_scw = true; + +-- object_copies serves a queue for which all objects still need to be +-- replicated. However, the canonical source of truth for an object is still +-- maintained in the original object_keys table. +-- +-- Add types for the new dcs that are introduced as part of replication v3. +ALTER TYPE s3region ADD VALUE 'wasabi-eu-central-2-v3'; +ALTER TYPE s3region ADD VALUE 'scw-eu-fr-v3'; diff --git a/server/migrations/58_update_ott_unique_constraint.down.sql b/server/migrations/58_update_ott_unique_constraint.down.sql new file mode 100644 index 000000000..94eb4c779 --- /dev/null +++ b/server/migrations/58_update_ott_unique_constraint.down.sql @@ -0,0 +1,8 @@ +ALTER TABLE + otts DROP CONSTRAINT unique_otts_emailhash_ott; + +ALTER TABLE + otts +ADD + CONSTRAINT otts_ott_key UNIQUE (ott); + \ No newline at end of file diff --git a/server/migrations/58_update_ott_unique_constraint.up.sql b/server/migrations/58_update_ott_unique_constraint.up.sql new file mode 100644 index 000000000..10bfb457c --- /dev/null +++ b/server/migrations/58_update_ott_unique_constraint.up.sql @@ -0,0 +1,8 @@ +ALTER TABLE + otts DROP CONSTRAINT otts_ott_key; + +ALTER TABLE + otts +ADD + CONSTRAINT unique_otts_emailhash_ott UNIQUE (ott, email_hash); + \ No newline at end of file diff --git a/server/migrations/59_delete_data.down.sql b/server/migrations/59_delete_data.down.sql new file mode 100644 index 000000000..bd7d936e6 --- /dev/null +++ b/server/migrations/59_delete_data.down.sql @@ -0,0 +1,2 @@ +DROP TABLE IF EXISTS data_cleanup; +DROP TYPE IF EXISTS stage_enum; diff --git a/server/migrations/59_delete_data.up.sql b/server/migrations/59_delete_data.up.sql new file mode 100644 index 000000000..f45d90e53 --- /dev/null +++ b/server/migrations/59_delete_data.up.sql @@ -0,0 +1,23 @@ +CREATE TYPE stage_enum AS ENUM ('scheduled', 'collection', 'trash', 'storage', 'completed'); + +CREATE TABLE IF NOT EXISTS data_cleanup +( + user_id BIGINT PRIMARY KEY, + stage stage_enum NOT NULL DEFAULT 'scheduled', + stage_schedule_time BIGINT NOT NULL DEFAULT now_utc_micro_seconds() + (7 * 24::BIGINT * 60 * 60 * 1000 * 1000), + stage_attempt_count int NOT NULL DEFAULT 0, + status TEXT NOT NULL DEFAULT '', + created_at bigint NOT NULL DEFAULT now_utc_micro_seconds(), + updated_at bigint NOT NULL DEFAULT now_utc_micro_seconds() +); + +insert into data_cleanup(user_id, stage_schedule_time) (select u.user_id, + GREATEST(max(t.last_used_at) + + (7::BIGINT * 24 * 60 * 60 * 1000 * 1000), + now_utc_micro_seconds()) + from users u + left join tokens t + on t.user_id = u.user_id + where u.encrypted_email is NULL + and u.email_hash like '%deleted%' + group by u.user_id); diff --git a/server/migrations/5_rename_storage_in_mbs_to_storage.down.sql b/server/migrations/5_rename_storage_in_mbs_to_storage.down.sql new file mode 100644 index 000000000..e7540c48a --- /dev/null +++ b/server/migrations/5_rename_storage_in_mbs_to_storage.down.sql @@ -0,0 +1 @@ +ALTER TABLE subscriptions RENAME storage TO storage_in_mbs; diff --git a/server/migrations/5_rename_storage_in_mbs_to_storage.up.sql b/server/migrations/5_rename_storage_in_mbs_to_storage.up.sql new file mode 100644 index 000000000..e4c13a672 --- /dev/null +++ b/server/migrations/5_rename_storage_in_mbs_to_storage.up.sql @@ -0,0 +1 @@ +ALTER TABLE subscriptions RENAME storage_in_mbs TO storage; diff --git a/server/migrations/60_add_columns_cf.down.sql b/server/migrations/60_add_columns_cf.down.sql new file mode 100644 index 000000000..d29b8bcc4 --- /dev/null +++ b/server/migrations/60_add_columns_cf.down.sql @@ -0,0 +1,8 @@ +ALTER TABLE collection_files + DROP COLUMN IF EXISTS c_owner_id, + DROP COLUMN IF EXISTS f_owner_id, + DROP COLUMN IF EXISTS created_at; + +ALTER TABLE collection_shares DROP COLUMN IF EXISTS role_type; + +DROP TYPE IF EXISTS role_enum; diff --git a/server/migrations/60_add_columns_cf.up.sql b/server/migrations/60_add_columns_cf.up.sql new file mode 100644 index 000000000..9de28ea9b --- /dev/null +++ b/server/migrations/60_add_columns_cf.up.sql @@ -0,0 +1,12 @@ +ALTER TABLE collection_files + ADD COLUMN IF NOT EXISTS c_owner_id bigint, + ADD COLUMN IF NOT EXISTS f_owner_id bigint, + ADD COLUMN IF NOT EXISTS created_at bigint; +-- set default after adding a colum otherwise for all existing rows we will end up setting wrong created_at time +ALTER TABLE collection_files + ALTER created_at SET DEFAULT now_utc_micro_seconds(); + +CREATE TYPE role_enum AS ENUM ('VIEWER', 'COLLABORATOR', 'OWNER'); + +ALTER TABLE collection_shares + ADD COLUMN IF NOT EXISTS role_type role_enum DEFAULT 'VIEWER'; diff --git a/server/migrations/61_storage_bonus.down.sql b/server/migrations/61_storage_bonus.down.sql new file mode 100644 index 000000000..1e975b560 --- /dev/null +++ b/server/migrations/61_storage_bonus.down.sql @@ -0,0 +1,3 @@ +DROP TABLE IF EXISTS referral_codes; +DROP TABLE IF EXISTS referral_tracking; +DROP TABLE IF EXISTS storage_bonus; diff --git a/server/migrations/61_storage_bonus.up.sql b/server/migrations/61_storage_bonus.up.sql new file mode 100644 index 000000000..42e7a3239 --- /dev/null +++ b/server/migrations/61_storage_bonus.up.sql @@ -0,0 +1,46 @@ +CREATE TABLE IF NOT EXISTS referral_codes +( + code VARCHAR(255) NOT NULL, + user_id BIGINT NOT NULL, + is_active BOOLEAN NOT NULL DEFAULT TRUE, + created_at bigint NOT NULL DEFAULT now_utc_micro_seconds(), + PRIMARY KEY (code) +); +--- Ensure that only one active referral code is allowed per user +CREATE UNIQUE INDEX ON referral_codes (user_id, is_active) WHERE (referral_codes.is_active = TRUE); + +CREATE TABLE IF NOT EXISTS referral_tracking +( + invitor_id bigint NOT NULL, + invitee_id bigint NOT NULL, + plan_type text NOT NULL check ( plan_type in ('10_GB_ON_UPGRADE')), + invitee_on_paid_plan bool default false, + created_at bigint NOT NULL DEFAULT now_utc_micro_seconds() +); +--- Add unique index on invitor_id and invitee_id column in referral_tracking table to ensure only +-- one entry per invitee is allowed +CREATE UNIQUE INDEX IF NOT EXISTS referral_tracking_invitee_id_idx ON referral_tracking (invitee_id); + + +CREATE TABLE IF NOT EXISTS storage_bonus +( + --- bonus_id is a unique identifier for each storage bonus. It is generated by the application layer. + --- The format of the bonus_id will depend on the type of bonus. + --- For example, if the type is 'REFERRAL', the bonus_id can be (REFERRAL-inviteeId). This will ensure that there's + -- only one bonus for a given invitee. Similarly for other types, the application layer decide bonus_id format.\ + bonus_id text primary key NOT NULL, + type text NOT NULL CHECK (type IN + ('REFERRAL', 'SIGN_UP', 'ANNIVERSARY')), + user_id bigint NOT NULL, + storage bigint NOT NULL, + valid_till bigint NOT NULL DEFAULT 0, + revoke_reason text, + is_revoked boolean NOT NULL DEFAULT FALSE, + created_at bigint NOT NULL DEFAULT now_utc_micro_seconds(), + updated_at bigint NOT NULL DEFAULT now_utc_micro_seconds() +); + +CREATE INDEX IF NOT EXISTS storage_bonus_user_id_idx ON storage_bonus (user_id); + + + diff --git a/server/migrations/62_entity_store.down.sql b/server/migrations/62_entity_store.down.sql new file mode 100644 index 000000000..8a8c16490 --- /dev/null +++ b/server/migrations/62_entity_store.down.sql @@ -0,0 +1,6 @@ +DROP TRIGGER IF EXISTS update_entity_data_updated_at ON entity_data; +DROP INDEX IF EXISTS entity_data_updated_at_time_index; +DROP INDEX IF EXISTS entity_data_state_constraint; +DROP TRIGGER IF EXISTS trigger_entity_key_on_entity_data_updation on entity_data; +DROP TABLE IF EXISTS entity_data; +DROP TABLE IF EXISTS entity_key; diff --git a/server/migrations/62_entity_store.up.sql b/server/migrations/62_entity_store.up.sql new file mode 100644 index 000000000..3d9d4803d --- /dev/null +++ b/server/migrations/62_entity_store.up.sql @@ -0,0 +1,64 @@ +CREATE TABLE IF NOT EXISTS entity_key +( + user_id BIGINT NOT NULL, + type TEXT NOT NULL, + encrypted_key TEXT NOT NULL, + header TEXT NOT NULL, + created_at BIGINT NOT NULL DEFAULT now_utc_micro_seconds(), + updated_at BIGINT NOT NULL DEFAULT now_utc_micro_seconds(), + PRIMARY KEY (user_id, type), + CONSTRAINT fk_entity_key_user_id FOREIGN KEY (user_id) REFERENCES users ( + user_id) ON DELETE CASCADE +); + + +CREATE TABLE IF NOT EXISTS entity_data +( + id uuid PRIMARY KEY NOT NULL, + user_id BIGINT NOT NULL, + type TEXT NOT NULL, + encrypted_data TEXT, + header TEXT, + created_at BIGINT NOT NULL DEFAULT now_utc_micro_seconds(), + updated_at BIGINT NOT NULL DEFAULT now_utc_micro_seconds(), + is_deleted BOOLEAN DEFAULT FALSE, + CONSTRAINT fk_entity_key_user_id_and_type FOREIGN KEY (user_id, type) REFERENCES entity_key (user_id, type) ON DELETE CASCADE +); + +CREATE INDEX IF NOT EXISTS entity_data_updated_at_time_index ON entity_data (user_id, updated_at); + +ALTER TABLE entity_data + ADD CONSTRAINT entity_data_state_constraint CHECK ((is_deleted is TRUE AND encrypted_data IS NULL) or + (is_deleted is FALSE AND encrypted_data IS NOT NULL)); + +CREATE TRIGGER update_entity_data_updated_at + BEFORE UPDATE + ON entity_data + FOR EACH ROW +EXECUTE PROCEDURE + trigger_updated_at_microseconds_column(); + + +-- This function updates the entity_key updated_at if the relevant entity_data is changed +CREATE OR REPLACE FUNCTION fn_update_entity_key_updated_at_via_updated_at() RETURNS TRIGGER AS +$$ +BEGIN + -- + IF (TG_OP = 'UPDATE' OR TG_OP = 'INSERT') THEN + UPDATE entity_key + SET updated_at = NEW.updated_at + where user_id = new.user_id + and type = new.type + and updated_at < New.updated_at; + RETURN NEW; + END IF; +END; +$$ LANGUAGE plpgsql; + +CREATE TRIGGER trigger_entity_key_on_entity_data_updation + AFTER INSERT OR UPDATE + ON entity_data + FOR EACH ROW +EXECUTE PROCEDURE + fn_update_entity_key_updated_at_via_updated_at(); + diff --git a/server/migrations/63_add_kex_store.down.sql b/server/migrations/63_add_kex_store.down.sql new file mode 100644 index 000000000..107c0fe90 --- /dev/null +++ b/server/migrations/63_add_kex_store.down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS kex_store; \ No newline at end of file diff --git a/server/migrations/63_add_kex_store.up.sql b/server/migrations/63_add_kex_store.up.sql new file mode 100644 index 000000000..ee68a9c6e --- /dev/null +++ b/server/migrations/63_add_kex_store.up.sql @@ -0,0 +1,7 @@ +CREATE TABLE IF NOT EXISTS kex_store ( + id TEXT UNIQUE PRIMARY KEY NOT NULL, + user_id BIGINT NOT NULL, + wrapped_key TEXT NOT NULL, + added_at BIGINT NOT NULL, + CONSTRAINT fk_kex_store_user_id FOREIGN KEY(user_id) REFERENCES users(user_id) ON DELETE CASCADE +); \ No newline at end of file diff --git a/server/migrations/64_collection_drop_pub_magic_metadata.down.sql b/server/migrations/64_collection_drop_pub_magic_metadata.down.sql new file mode 100644 index 000000000..8ce911074 --- /dev/null +++ b/server/migrations/64_collection_drop_pub_magic_metadata.down.sql @@ -0,0 +1,2 @@ +ALTER TABLE collections + DROP COLUMN pub_magic_metadata; diff --git a/server/migrations/64_collection_pub_magic_metadata.up.sql b/server/migrations/64_collection_pub_magic_metadata.up.sql new file mode 100644 index 000000000..9959fa72e --- /dev/null +++ b/server/migrations/64_collection_pub_magic_metadata.up.sql @@ -0,0 +1,2 @@ +ALTER TABLE collections + ADD COLUMN pub_magic_metadata JSONB; diff --git a/server/migrations/65_shared_collection_drop_magic_metadata.down.sql b/server/migrations/65_shared_collection_drop_magic_metadata.down.sql new file mode 100644 index 000000000..0d96e8a57 --- /dev/null +++ b/server/migrations/65_shared_collection_drop_magic_metadata.down.sql @@ -0,0 +1,2 @@ +ALTER TABLE collection_shares + DROP COLUMN magic_metadata; diff --git a/server/migrations/65_shared_collection_magic_metadata.up.sql b/server/migrations/65_shared_collection_magic_metadata.up.sql new file mode 100644 index 000000000..d484c360b --- /dev/null +++ b/server/migrations/65_shared_collection_magic_metadata.up.sql @@ -0,0 +1,2 @@ +ALTER TABLE collection_shares + ADD COLUMN magic_metadata JSONB; diff --git a/server/migrations/66_add_srp_attributes.down.sql b/server/migrations/66_add_srp_attributes.down.sql new file mode 100644 index 000000000..ca84bde67 --- /dev/null +++ b/server/migrations/66_add_srp_attributes.down.sql @@ -0,0 +1,3 @@ +DROP TABLE temp_srp_setup; +DROP TABLE srp_auth; +DROP TABLE srp_sessions; diff --git a/server/migrations/66_add_srp_attributes.up.sql b/server/migrations/66_add_srp_attributes.up.sql new file mode 100644 index 000000000..14118597f --- /dev/null +++ b/server/migrations/66_add_srp_attributes.up.sql @@ -0,0 +1,41 @@ +-- This temporary table is used to store the SRP salt and verifier during +-- the SRP registration process or when the user changes their password. +-- Once the user has verified their email address, the salt and verifier +-- are copied to the srp_auth table. +CREATE TABLE IF NOT EXISTS srp_auth ( + user_id BIGINT PRIMARY KEY NOT NULL, + srp_user_id uuid NOT NULL UNIQUE, + salt TEXT NOT NULL, + verifier TEXT NOT NULL, + created_at BIGINT NOT NULL DEFAULT now_utc_micro_seconds(), + updated_at BIGINT NOT NULL DEFAULT now_utc_micro_seconds(), + CONSTRAINT fk_srp_auth_user_id + FOREIGN KEY (user_id) + REFERENCES users (user_id) + ON DELETE CASCADE +); + +CREATE TABLE IF NOT EXISTS temp_srp_setup ( + id uuid PRIMARY KEY NOT NULL, + session_id uuid NOT NULL, + srp_user_id uuid NOT NULL, + user_id BIGINT NOT NULL, + salt TEXT NOT NULL, + verifier TEXT NOT NULL, + created_at BIGINT NOT NULL DEFAULT now_utc_micro_seconds(), + CONSTRAINT fk_temp_srp_setup_user_id + FOREIGN KEY (user_id) + REFERENCES users (user_id) + ON DELETE CASCADE +); + +CREATE TABLE IF NOT EXISTS srp_sessions ( + id uuid PRIMARY KEY NOT NULL, + srp_user_id uuid NOT NULL, + server_key TEXT NOT NULL, + srp_a TEXT NOT NULL, + has_verified BOOLEAN NOT NULL DEFAULT false, + attempt_count INT NOT NULL DEFAULT 0, + created_at bigint NOT NULL DEFAULT now_utc_micro_seconds(), + updated_at BIGINT NOT NULL DEFAULT now_utc_micro_seconds() +); diff --git a/server/migrations/67_add_email_mfa_column.up.sql b/server/migrations/67_add_email_mfa_column.up.sql new file mode 100644 index 000000000..53c31b2bd --- /dev/null +++ b/server/migrations/67_add_email_mfa_column.up.sql @@ -0,0 +1,10 @@ +--- Add email_mfa bool column to users table with default value same as is_two_factor_enabled +--- Alter the column to not null after back-filling data + +ALTER TABLE users + ADD COLUMN IF NOT EXISTS email_mfa boolean DEFAULT false; +UPDATE users +SET email_mfa = NOT is_two_factor_enabled; +ALTER TABLE users + ALTER COLUMN email_mfa SET NOT NULL; + diff --git a/server/migrations/67_email_mfa_column.down.sql b/server/migrations/67_email_mfa_column.down.sql new file mode 100644 index 000000000..c24d7ce81 --- /dev/null +++ b/server/migrations/67_email_mfa_column.down.sql @@ -0,0 +1,2 @@ +ALTER TABLE users + DROP COLUMN IF EXISTS email_mfa; diff --git a/server/migrations/68_add_locker_changes.down.sql b/server/migrations/68_add_locker_changes.down.sql new file mode 100644 index 000000000..8d66fb94b --- /dev/null +++ b/server/migrations/68_add_locker_changes.down.sql @@ -0,0 +1,20 @@ +-- Drop the new unique index + +DROP INDEX IF EXISTS collections_uncategorized_constraint_index_v2; + +-- Re-create the old unique index + +CREATE UNIQUE INDEX IF NOT EXISTS collections_uncategorized_constraint_index ON collections (owner_id, app) +WHERE (type = 'uncategorized'); + +-- Remove NOT NULL constraints + +ALTER TABLE collections ALTER COLUMN app DROP NOT NULL; + +-- Remove default values + +ALTER TABLE collections ALTER COLUMN app DROP DEFAULT; + +-- Update columns back to NULL + +UPDATE collections SET app = NULL WHERE app = 'photos'; diff --git a/server/migrations/68_add_locker_changes.up.sql b/server/migrations/68_add_locker_changes.up.sql new file mode 100644 index 000000000..b836240a8 --- /dev/null +++ b/server/migrations/68_add_locker_changes.up.sql @@ -0,0 +1,22 @@ +ALTER TYPE app ADD VALUE 'locker'; + +-- Alter the column to make it non-null + +ALTER TABLE collections ADD COLUMN app app DEFAULT 'photos'; + +-- Update the existing app that are null to default ("photos") and make it non null. + +UPDATE collections SET app = 'photos' WHERE app IS NULL; + +-- Alter the column to make it non-null + +ALTER TABLE collections ALTER COLUMN app SET NOT NULL; + +-- Create a new unique index for uncategorized collections + +CREATE UNIQUE INDEX IF NOT EXISTS collections_uncategorized_constraint_index_v2 ON collections (owner_id, app) +WHERE (type = 'uncategorized'); + +-- Drop the older index if it exists + +DROP INDEX IF EXISTS collections_uncategorized_constraint_index; \ No newline at end of file diff --git a/server/migrations/69_add_srp_updated_at_trigger.down.sql b/server/migrations/69_add_srp_updated_at_trigger.down.sql new file mode 100644 index 000000000..d31b2ce81 --- /dev/null +++ b/server/migrations/69_add_srp_updated_at_trigger.down.sql @@ -0,0 +1,3 @@ +DROP TRIGGER IF EXISTS update_srp_auth_updated_at ON srp_auth; + +DROP TRIGGER IF EXISTS update_srp_sessions_updated_at ON srp_sessions; diff --git a/server/migrations/69_add_srp_updated_at_trigger.up.sql b/server/migrations/69_add_srp_updated_at_trigger.up.sql new file mode 100644 index 000000000..81880b9cc --- /dev/null +++ b/server/migrations/69_add_srp_updated_at_trigger.up.sql @@ -0,0 +1,13 @@ +CREATE TRIGGER update_srp_auth_updated_at + BEFORE UPDATE + ON srp_auth + FOR EACH ROW +EXECUTE PROCEDURE + trigger_updated_at_microseconds_column(); + +CREATE TRIGGER update_srp_sessions_updated_at + BEFORE UPDATE + ON srp_sessions + FOR EACH ROW +EXECUTE PROCEDURE + trigger_updated_at_microseconds_column(); diff --git a/server/migrations/6_create_usage.down.sql b/server/migrations/6_create_usage.down.sql new file mode 100644 index 000000000..c2e85cd2d --- /dev/null +++ b/server/migrations/6_create_usage.down.sql @@ -0,0 +1,4 @@ +DROP INDEX usage_user_id_index; + +DROP TABLE usage; + diff --git a/server/migrations/6_create_usage.up.sql b/server/migrations/6_create_usage.up.sql new file mode 100644 index 000000000..cb48f405a --- /dev/null +++ b/server/migrations/6_create_usage.up.sql @@ -0,0 +1,29 @@ +CREATE TABLE IF NOT EXISTS usage( + user_id INTEGER NOT NULL, + storage_consumed BIGINT NOT NULL, + + CONSTRAINT fk_usage_user_id + FOREIGN KEY(user_id) + REFERENCES users(user_id) + ON DELETE CASCADE +); + +CREATE INDEX IF NOT EXISTS usage_user_id_index ON usage(user_id); + +INSERT INTO usage(user_id,storage_consumed) + SELECT user_id, COALESCE(total_file_size+total_thumbnail_size,0) FROM + users, + LATERAL ( + SELECT SUM(size) AS total_thumbnail_size + FROM thumbnail_object_keys + LEFT JOIN files ON files.file_id = thumbnail_object_keys.file_id + WHERE + owner_id = users.user_id + ) query_1, + LATERAL ( + SELECT SUM(size) AS total_file_size + FROM file_object_keys + LEFT JOIN files ON files.file_id = file_object_keys.file_id + WHERE + owner_id = users.user_id + ) query_2 diff --git a/server/migrations/70_add_embeddings.down.sql b/server/migrations/70_add_embeddings.down.sql new file mode 100644 index 000000000..54c8a26c8 --- /dev/null +++ b/server/migrations/70_add_embeddings.down.sql @@ -0,0 +1,4 @@ +DROP TRIGGER IF EXISTS update_embeddings_updated_at ON embeddings; +DROP TABLE embeddings; +DROP TYPE model; +DROP INDEX IF EXISTS embeddings_owner_id_updated_at_index; diff --git a/server/migrations/70_add_embeddings.up.sql b/server/migrations/70_add_embeddings.up.sql new file mode 100644 index 000000000..ff4fd1ef3 --- /dev/null +++ b/server/migrations/70_add_embeddings.up.sql @@ -0,0 +1,25 @@ +CREATE TYPE model AS ENUM ('ggml-clip'); + +CREATE TABLE IF NOT EXISTS embeddings( + file_id BIGINT NOT NULL, + owner_id BIGINT NOT NULL, + model model NOT NULL, + encrypted_embedding TEXT NOT NULL, + decryption_header TEXT NOT NULL, + updated_at BIGINT NOT NULL DEFAULT now_utc_micro_seconds(), + CONSTRAINT unique_embeddings_file_id_model + UNIQUE (file_id, model), + CONSTRAINT fk_embeddings_file_id + FOREIGN KEY (file_id) + REFERENCES files (file_id) + ON DELETE CASCADE +); + +CREATE INDEX IF NOT EXISTS embeddings_owner_id_updated_at_index ON embeddings (owner_id, updated_at); + +CREATE TRIGGER update_embeddings_updated_at + BEFORE UPDATE + ON embeddings + FOR EACH ROW +EXECUTE PROCEDURE + trigger_updated_at_microseconds_column(); diff --git a/server/migrations/71_user_source_attr.down.sql b/server/migrations/71_user_source_attr.down.sql new file mode 100644 index 000000000..d0f42f7d2 --- /dev/null +++ b/server/migrations/71_user_source_attr.down.sql @@ -0,0 +1,6 @@ +-- DELETE source column if exists +ALTER TABLE users + DROP COLUMN IF EXISTS source; +-- DELETE delete_feedback column if exists +ALTER TABLE users + DROP COLUMN IF EXISTS delete_feedback; diff --git a/server/migrations/71_user_source_attr.up.sql b/server/migrations/71_user_source_attr.up.sql new file mode 100644 index 000000000..430abaa3a --- /dev/null +++ b/server/migrations/71_user_source_attr.up.sql @@ -0,0 +1,5 @@ +ALTER TABLE users + ADD COLUMN IF NOT EXISTS source text; +-- Add JSON column to capture delete feedback +ALTER TABLE users + ADD COLUMN IF NOT EXISTS delete_feedback jsonb; diff --git a/server/migrations/72_add_on_storage.down.sql b/server/migrations/72_add_on_storage.down.sql new file mode 100644 index 000000000..4d68ea7ef --- /dev/null +++ b/server/migrations/72_add_on_storage.down.sql @@ -0,0 +1,6 @@ +ALTER TABLE storage_bonus + DROP CONSTRAINT IF EXISTS storage_bonus_type_check; + +ALTER TABLE storage_bonus + ADD CONSTRAINT storage_bonus_type_check + CHECK (type IN ('REFERRAL', 'SIGN_UP', 'ANNIVERSARY')); \ No newline at end of file diff --git a/server/migrations/72_add_on_storage.up.sql b/server/migrations/72_add_on_storage.up.sql new file mode 100644 index 000000000..cdc0ab98f --- /dev/null +++ b/server/migrations/72_add_on_storage.up.sql @@ -0,0 +1,6 @@ +ALTER TABLE storage_bonus + DROP CONSTRAINT IF EXISTS storage_bonus_type_check; + +ALTER TABLE storage_bonus + ADD CONSTRAINT storage_bonus_type_check + CHECK (type IN ('REFERRAL', 'SIGN_UP', 'ANNIVERSARY', 'ADD_ON_BF_2023', 'ADD_ON_SUPPORT')); \ No newline at end of file diff --git a/server/migrations/73_remove_kex_user_id_fk.down.sql b/server/migrations/73_remove_kex_user_id_fk.down.sql new file mode 100644 index 000000000..198ea4005 --- /dev/null +++ b/server/migrations/73_remove_kex_user_id_fk.down.sql @@ -0,0 +1,5 @@ +ALTER TABLE kex_store ADD COLUMN user_id BIGINT NOT NULL; + +ALTER TABLE kex_store +ADD + CONSTRAINT fk_kex_store_user_id FOREIGN KEY (user_id) REFERENCES users(user_id) ON DELETE CASCADE; \ No newline at end of file diff --git a/server/migrations/73_remove_kex_user_id_fk.up.sql b/server/migrations/73_remove_kex_user_id_fk.up.sql new file mode 100644 index 000000000..332f6e2fd --- /dev/null +++ b/server/migrations/73_remove_kex_user_id_fk.up.sql @@ -0,0 +1,3 @@ +ALTER TABLE kex_store DROP CONSTRAINT IF EXISTS fk_kex_store_user_id; + +ALTER TABLE kex_store DROP COLUMN IF EXISTS user_id; \ No newline at end of file diff --git a/server/migrations/74_add_onnx.down.sql b/server/migrations/74_add_onnx.down.sql new file mode 100644 index 000000000..6278b15c2 --- /dev/null +++ b/server/migrations/74_add_onnx.down.sql @@ -0,0 +1,2 @@ +-- noop since we can't remove a value from an enum: https://www.postgresql.org/docs/current/datatype-enum.html +-- and a migration is expensive diff --git a/server/migrations/74_add_onnx.up.sql b/server/migrations/74_add_onnx.up.sql new file mode 100644 index 000000000..eea405c61 --- /dev/null +++ b/server/migrations/74_add_onnx.up.sql @@ -0,0 +1 @@ +ALTER TYPE model ADD VALUE 'onnx-clip'; diff --git a/server/migrations/75_add_passkeys.down.sql b/server/migrations/75_add_passkeys.down.sql new file mode 100644 index 000000000..fb58ad6a6 --- /dev/null +++ b/server/migrations/75_add_passkeys.down.sql @@ -0,0 +1,5 @@ +DROP TABLE IF EXISTS webauthn_sessions; + +DROP TABLE IF EXISTS passkey_credentials; + +DROP TABLE IF EXISTS passkeys; \ No newline at end of file diff --git a/server/migrations/75_add_passkeys.up.sql b/server/migrations/75_add_passkeys.up.sql new file mode 100644 index 000000000..92db268c2 --- /dev/null +++ b/server/migrations/75_add_passkeys.up.sql @@ -0,0 +1,61 @@ +CREATE TABLE + IF NOT EXISTS passkeys( + id uuid PRIMARY KEY NOT NULL, + user_id BIGINT NOT NULL, + friendly_name TEXT NOT NULL, + deleted_at BIGINT, + created_at bigint NOT NULL DEFAULT now_utc_micro_seconds(), + +CONSTRAINT fk_passkeys_user_id FOREIGN KEY(user_id) REFERENCES users(user_id) ON DELETE CASCADE +); + +CREATE TABLE + IF NOT EXISTS passkey_credentials( + passkey_id uuid PRIMARY KEY NOT NULL, + + credential_id TEXT NOT NULL UNIQUE, + +-- credential info + +-- []byte data will be encoded in b64 before being inserted into the DB + +-- fields that are are arrays will be comma separated strings + +-- structs will be encoded into JSON before being inserted into DB (they don't need to be queried anyway) + +public_key TEXT NOT NULL, +-- binary data +attestation_type TEXT NOT NULL, +authenticator_transports TEXT NOT NULL, +-- array +credential_flags TEXT NOT NULL, +-- struct +authenticator TEXT NOT NULL, +-- struct + +created_at bigint NOT NULL DEFAULT now_utc_micro_seconds(), + +CONSTRAINT fk_passkey_credentials_passkey_id FOREIGN KEY(passkey_id) REFERENCES passkeys(id) ON DELETE CASCADE +); + +CREATE TABLE + IF NOT EXISTS webauthn_sessions( + id uuid PRIMARY KEY NOT NULL, + +challenge TEXT NOT NULL UNIQUE, + +user_id BIGINT NOT NULL, +-- this is meant to be []byte but we'll store it normally for us +allowed_credential_ids TEXT NOT NULL, +-- this is [][]byte, but we'll encode it to b64 to store in db +expires_at bigint NOT NULL, +-- this is time.Time but we'll encode it into unix + +user_verification_requirement TEXT NOT NULL, +extensions TEXT NOT NULL, +-- this is a map[string]interface{} but we'll just store it as json + +created_at bigint NOT NULL DEFAULT now_utc_micro_seconds(), + +CONSTRAINT fk_webauthn_sessions_user_id FOREIGN KEY(user_id) REFERENCES users(user_id) ON DELETE CASCADE +); \ No newline at end of file diff --git a/server/migrations/76_add_passkey_login_sessions.down.sql b/server/migrations/76_add_passkey_login_sessions.down.sql new file mode 100644 index 000000000..1aa52759a --- /dev/null +++ b/server/migrations/76_add_passkey_login_sessions.down.sql @@ -0,0 +1 @@ +DROP TABLE IF EXISTS passkey_login_sessions; \ No newline at end of file diff --git a/server/migrations/76_add_passkey_login_sessions.up.sql b/server/migrations/76_add_passkey_login_sessions.up.sql new file mode 100644 index 000000000..de5a51aba --- /dev/null +++ b/server/migrations/76_add_passkey_login_sessions.up.sql @@ -0,0 +1,10 @@ +CREATE TABLE IF NOT EXISTS passkey_login_sessions( + user_id BIGINT NOT NULL, + session_id TEXT UNIQUE NOT NULL, + creation_time BIGINT NOT NULL, + expiration_time BIGINT NOT NULL, + CONSTRAINT fk_passkey_login_sessions_user_id + FOREIGN KEY(user_id) + REFERENCES users(user_id) + ON DELETE CASCADE +); \ No newline at end of file diff --git a/server/migrations/77_embeddings_table_update.down.sql b/server/migrations/77_embeddings_table_update.down.sql new file mode 100644 index 000000000..26b14cc69 --- /dev/null +++ b/server/migrations/77_embeddings_table_update.down.sql @@ -0,0 +1,3 @@ +ALTER TABLE embeddings + ALTER COLUMN encrypted_embedding SET NOT NULL, + ALTER COLUMN decryption_header SET NOT NULL; diff --git a/server/migrations/77_embeddings_table_update.up.sql b/server/migrations/77_embeddings_table_update.up.sql new file mode 100644 index 000000000..790de4a96 --- /dev/null +++ b/server/migrations/77_embeddings_table_update.up.sql @@ -0,0 +1,3 @@ +ALTER TABLE embeddings + ALTER COLUMN encrypted_embedding DROP NOT NULL, + ALTER COLUMN decryption_header DROP NOT NULL; diff --git a/server/migrations/78_cast.down.sql b/server/migrations/78_cast.down.sql new file mode 100644 index 000000000..5ef1b1df1 --- /dev/null +++ b/server/migrations/78_cast.down.sql @@ -0,0 +1,2 @@ +DROP TABLE IF EXISTS casting; +DROP INDEX IF EXISTS casting_code_unique_idx; diff --git a/server/migrations/78_cast.up.sql b/server/migrations/78_cast.up.sql new file mode 100644 index 000000000..602db3b9a --- /dev/null +++ b/server/migrations/78_cast.up.sql @@ -0,0 +1,16 @@ +-- note: using casting as table name because cast is a reserved word in postgres +CREATE TABLE IF NOT EXISTS casting +( + id uuid not null primary key, + code VARCHAR(16) NOT NULL, + public_key VARCHAR(512) NOT NULL, + collection_id BIGINT, + cast_user BIGINT, + encrypted_payload text, + token VARCHAR(512), + is_deleted BOOLEAN DEFAULT FALSE, + created_at BIGINT NOT NULL DEFAULT now_utc_micro_seconds(), + last_used_at BIGINT NOT NULL DEFAULT now_utc_micro_seconds() +); +-- create unique constraint on not deleted code +CREATE UNIQUE INDEX IF NOT EXISTS casting_code_unique_idx ON casting (code) WHERE is_deleted = FALSE; diff --git a/server/migrations/79_queue_index.down.sql b/server/migrations/79_queue_index.down.sql new file mode 100644 index 000000000..9edfa6f2f --- /dev/null +++ b/server/migrations/79_queue_index.down.sql @@ -0,0 +1 @@ +DROP INDEX IF EXISTS idx_queue_created_at_non_deleted; diff --git a/server/migrations/79_queue_index.up.sql b/server/migrations/79_queue_index.up.sql new file mode 100644 index 000000000..917e4f7a0 --- /dev/null +++ b/server/migrations/79_queue_index.up.sql @@ -0,0 +1,3 @@ +-- create partial index on non-deleted queue items as we always query for non-deleted entries +CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_queue_created_at_non_deleted ON queue (queue_name, created_at) + WHERE is_deleted = false; diff --git a/server/migrations/7_add_unique_usage_user_id.down.sql b/server/migrations/7_add_unique_usage_user_id.down.sql new file mode 100644 index 000000000..9b333e54f --- /dev/null +++ b/server/migrations/7_add_unique_usage_user_id.down.sql @@ -0,0 +1 @@ +DROP INDEX usage_user_id_unique_constraint_index; diff --git a/server/migrations/7_add_unique_usage_user_id.up.sql b/server/migrations/7_add_unique_usage_user_id.up.sql new file mode 100644 index 000000000..dc0c85af4 --- /dev/null +++ b/server/migrations/7_add_unique_usage_user_id.up.sql @@ -0,0 +1 @@ +ALTER TABLE usage ADD CONSTRAINT usage_user_id_unique_constraint_index UNIQUE (user_id); diff --git a/server/migrations/8_update_otts_schema_link_with_email.down.sql b/server/migrations/8_update_otts_schema_link_with_email.down.sql new file mode 100644 index 000000000..bda9b99d8 --- /dev/null +++ b/server/migrations/8_update_otts_schema_link_with_email.down.sql @@ -0,0 +1,11 @@ +ALTER TABLE otts + ADD COLUMN user_id INTEGER; + +UPDATE otts + SET user_id = (SELECT user_id + FROM users + WHERE users.email=otts.email); + +ALTER TABLE otts + DROP COLUMN email, + ALTER COLUMN user_id SET NOT NULL; diff --git a/server/migrations/8_update_otts_schema_link_with_email.up.sql b/server/migrations/8_update_otts_schema_link_with_email.up.sql new file mode 100644 index 000000000..053a1f6f9 --- /dev/null +++ b/server/migrations/8_update_otts_schema_link_with_email.up.sql @@ -0,0 +1,11 @@ +ALTER TABLE otts + ADD COLUMN email TEXT; + +UPDATE otts + SET email = (SELECT email + FROM users + WHERE users.user_id=otts.user_id); + +ALTER TABLE otts + DROP COLUMN user_id, + ALTER COLUMN email SET NOT NULL; diff --git a/server/migrations/9_create_datacenter.down.sql b/server/migrations/9_create_datacenter.down.sql new file mode 100644 index 000000000..582f3ad5f --- /dev/null +++ b/server/migrations/9_create_datacenter.down.sql @@ -0,0 +1,11 @@ +DROP TYPE s3region; + +ALTER TABLE file_object_keys + DROP COLUMN datacenters; + +ALTER TABLE thumbnail_object_keys + DROP COLUMN datacenters; + +DROP TABLE task_lock; + +DROP TABLE queue; diff --git a/server/migrations/9_create_datacenter.up.sql b/server/migrations/9_create_datacenter.up.sql new file mode 100644 index 000000000..c7d31e3e5 --- /dev/null +++ b/server/migrations/9_create_datacenter.up.sql @@ -0,0 +1,26 @@ +CREATE TYPE s3region AS ENUM ('b2-eu-cen','scw-eu-fr'); + +ALTER TABLE thumbnail_object_keys + ADD COLUMN datacenters s3region[] DEFAULT '{}'; + +UPDATE thumbnail_object_keys SET datacenters = '{b2-eu-cen}'; + +ALTER TABLE file_object_keys + ADD COLUMN datacenters s3region[] DEFAULT '{}'; + +UPDATE file_object_keys SET datacenters = '{b2-eu-cen}'; + +CREATE TABLE IF NOT EXISTS task_lock ( + task_name TEXT PRIMARY KEY, + lock_until BIGINT NOT NULL, + locked_at BIGINT NOT NULL, + locked_by TEXT NOT NULL +); + +CREATE INDEX IF NOT EXISTS task_lock_locked_until ON task_lock(lock_until); + +CREATE TABLE IF NOT EXISTS queue ( + queue_id SERIAL PRIMARY KEY, + queue_name TEXT NOT NULL, + item TEXT NOT NULL +); diff --git a/server/pkg/api/admin.go b/server/pkg/api/admin.go new file mode 100644 index 000000000..56838ff1a --- /dev/null +++ b/server/pkg/api/admin.go @@ -0,0 +1,429 @@ +package api + +import ( + "errors" + "fmt" + "net/http" + "strconv" + "strings" + + "github.com/ente-io/museum/pkg/controller/family" + + "github.com/ente-io/museum/pkg/repo/storagebonus" + + gTime "time" + + "github.com/ente-io/museum/pkg/controller" + "github.com/ente-io/museum/pkg/controller/discord" + "github.com/ente-io/museum/pkg/controller/user" + "github.com/ente-io/museum/pkg/utils/auth" + "github.com/ente-io/museum/pkg/utils/time" + "github.com/gin-contrib/requestid" + "github.com/sirupsen/logrus" + + "github.com/ente-io/museum/pkg/utils/crypto" + "github.com/ente-io/stacktrace" + + "github.com/ente-io/museum/ente" + "github.com/ente-io/museum/pkg/repo" + emailUtil "github.com/ente-io/museum/pkg/utils/email" + "github.com/ente-io/museum/pkg/utils/handler" + "github.com/gin-gonic/gin" +) + +// AdminHandler exposes request handlers for all admin related requests +type AdminHandler struct { + UserRepo *repo.UserRepository + CollectionRepo *repo.CollectionRepository + UserAuthRepo *repo.UserAuthRepository + FileRepo *repo.FileRepository + BillingRepo *repo.BillingRepository + StorageBonusRepo *storagebonus.Repository + BillingController *controller.BillingController + UserController *user.UserController + FamilyController *family.Controller + ObjectCleanupController *controller.ObjectCleanupController + MailingListsController *controller.MailingListsController + DiscordController *discord.DiscordController + HashingKey []byte + PasskeyController *controller.PasskeyController +} + +// Duration for which an admin's token is considered valid +const AdminTokenValidityInMinutes = 10 + +func (h *AdminHandler) SendMail(c *gin.Context) { + var req ente.SendEmailRequest + err := c.ShouldBindJSON(&req) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + err = emailUtil.Send(req.To, req.FromName, req.FromEmail, req.Subject, req.Body, nil) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.JSON(http.StatusOK, gin.H{}) +} + +func (h *AdminHandler) SubscribeMail(c *gin.Context) { + email := c.Query("email") + err := h.MailingListsController.Subscribe(email) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.Status(http.StatusOK) +} + +func (h *AdminHandler) UnsubscribeMail(c *gin.Context) { + email := c.Query("email") + err := h.MailingListsController.Unsubscribe(email) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.Status(http.StatusOK) +} + +func (h *AdminHandler) GetUsers(c *gin.Context) { + err := h.isFreshAdminToken(c) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + sinceTime, err := strconv.ParseInt(c.Query("sinceTime"), 10, 64) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + users, err := h.UserRepo.GetAll(sinceTime, time.Microseconds()) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.JSON(http.StatusOK, gin.H{"users": users}) +} + +func (h *AdminHandler) GetUser(c *gin.Context) { + e := c.Query("email") + if e == "" { + id, err := strconv.ParseInt(c.Query("id"), 10, 64) + if err != nil { + handler.Error(c, stacktrace.Propagate(ente.ErrBadRequest, "")) + return + } + user, err := h.UserRepo.GetUserByIDInternal(id) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + response := gin.H{ + "user": user, + } + h.attachSubscription(c, user.ID, response) + c.JSON(http.StatusOK, response) + return + } + emailHash, err := crypto.GetHash(e, h.HashingKey) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + user, err := h.UserRepo.GetUserByEmailHash(emailHash) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + user.Email = e + response := gin.H{ + "user": user, + } + h.attachSubscription(c, user.ID, response) + c.JSON(http.StatusOK, response) +} + +func (h *AdminHandler) DeleteUser(c *gin.Context) { + err := h.isFreshAdminToken(c) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + email := c.Query("email") + email = strings.TrimSpace(email) + if email == "" { + handler.Error(c, stacktrace.Propagate(ente.ErrBadRequest, "email id is missing")) + return + } + emailHash, err := crypto.GetHash(email, h.HashingKey) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + user, err := h.UserRepo.GetUserByEmailHash(emailHash) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + adminID := auth.GetUserID(c.Request.Header) + logger := logrus.WithFields(logrus.Fields{ + "user_id": user.ID, + "admin_id": adminID, + "user_email": email, + "req_id": requestid.Get(c), + "req_ctx": "account_deletion", + }) + response, err := h.UserController.HandleAccountDeletion(c, user.ID, logger) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + go h.DiscordController.NotifyAdminAction( + fmt.Sprintf("Admin (%d) deleting account for %d", adminID, user.ID)) + c.JSON(http.StatusOK, response) +} + +func (h *AdminHandler) isFreshAdminToken(c *gin.Context) error { + token := auth.GetToken(c) + creationTime, err := h.UserAuthRepo.GetTokenCreationTime(token) + if err != nil { + return err + } + if (creationTime + time.MicroSecondsInOneMinute*AdminTokenValidityInMinutes) < time.Microseconds() { + err = ente.NewBadRequestError(&ente.ApiErrorParams{ + Message: "Token is too old", + }) + return err + } + return nil +} + +func (h *AdminHandler) DisableTwoFactor(c *gin.Context) { + err := h.isFreshAdminToken(c) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + var request ente.DisableTwoFactorRequest + if err := c.ShouldBindJSON(&request); err != nil { + handler.Error(c, stacktrace.Propagate(ente.ErrBadRequest, "Bad request")) + return + } + + go h.DiscordController.NotifyAdminAction( + fmt.Sprintf("Admin (%d) disabling 2FA for account %d", auth.GetUserID(c.Request.Header), request.UserID)) + logger := logrus.WithFields(logrus.Fields{ + "user_id": request.UserID, + "admin_id": auth.GetUserID(c.Request.Header), + "req_id": requestid.Get(c), + "req_ctx": "disable_2fa", + }) + logger.Info("Initiate disable 2FA") + err = h.UserController.DisableTwoFactor(request.UserID) + if err != nil { + logger.WithError(err).Error("Failed to disable 2FA") + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + logger.Info("2FA successfully disabled") + c.JSON(http.StatusOK, gin.H{}) +} + +// RemovePasskeys is an admin API request to disable passkey 2FA for a user account by removing its passkeys. +// This is used when we get a user request to reset their passkeys 2FA when they might've lost access to their devices or synced stores. We verify their identity out of band. +// BY DEFAULT, IF THE USER HAS TOTP BASED 2FA ENABLED, REMOVING PASSKEYS WILL NOT DISABLE TOTP 2FA. +func (h *AdminHandler) RemovePasskeys(c *gin.Context) { + var request ente.AdminOpsForUserRequest + if err := c.ShouldBindJSON(&request); err != nil { + handler.Error(c, stacktrace.Propagate(ente.ErrBadRequest, "Bad request")) + return + } + + go h.DiscordController.NotifyAdminAction( + fmt.Sprintf("Admin (%d) removing passkeys for account %d", auth.GetUserID(c.Request.Header), request.UserID)) + logger := logrus.WithFields(logrus.Fields{ + "user_id": request.UserID, + "admin_id": auth.GetUserID(c.Request.Header), + "req_id": requestid.Get(c), + "req_ctx": "remove_passkeys", + }) + logger.Info("Initiate remove passkeys") + err := h.PasskeyController.RemovePasskey2FA(request.UserID) + if err != nil { + logger.WithError(err).Error("Failed to remove passkeys") + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + logger.Info("Passkeys successfully removed") + c.JSON(http.StatusOK, gin.H{}) +} + +func (h *AdminHandler) CloseFamily(c *gin.Context) { + + var request ente.AdminOpsForUserRequest + if err := c.ShouldBindJSON(&request); err != nil { + handler.Error(c, stacktrace.Propagate(ente.ErrBadRequest, "Bad request")) + return + } + + go h.DiscordController.NotifyAdminAction( + fmt.Sprintf("Admin (%d) closing family for account %d", auth.GetUserID(c.Request.Header), request.UserID)) + logger := logrus.WithFields(logrus.Fields{ + "user_id": request.UserID, + "admin_id": auth.GetUserID(c.Request.Header), + "req_id": requestid.Get(c), + "req_ctx": "close_family", + }) + logger.Info("Start close family") + err := h.FamilyController.CloseFamily(c, request.UserID) + if err != nil { + logger.WithError(err).Error("Failed to close family") + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + logger.Info("Finished close family") + c.JSON(http.StatusOK, gin.H{}) +} + +func (h *AdminHandler) UpdateSubscription(c *gin.Context) { + var r ente.UpdateSubscriptionRequest + if err := c.ShouldBindJSON(&r); err != nil { + handler.Error(c, stacktrace.Propagate(ente.ErrBadRequest, "Bad request")) + return + } + r.AdminID = auth.GetUserID(c.Request.Header) + go h.DiscordController.NotifyAdminAction( + fmt.Sprintf("Admin (%d) updating subscription for user: %d", r.AdminID, r.UserID)) + err := h.BillingController.UpdateSubscription(r) + if err != nil { + logrus.WithError(err).Error("Failed to update subscription") + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + logrus.Info("Updated subscription") + c.JSON(http.StatusOK, gin.H{}) +} + +func (h *AdminHandler) UpdateBFDeal(c *gin.Context) { + var r ente.UpdateBlackFridayDeal + if err := c.ShouldBindJSON(&r); err != nil { + handler.Error(c, stacktrace.Propagate(ente.ErrBadRequest, "Bad request")) + return + } + if err := r.Validate(); err != nil { + handler.Error(c, stacktrace.Propagate(ente.NewBadRequestWithMessage(err.Error()), "Bad request")) + return + } + adminID := auth.GetUserID(c.Request.Header) + var storage, validTill int64 + if r.Testing { + storage = r.StorageInMB * 1024 * 1024 + validTill = gTime.Now().Add(gTime.Duration(r.Minute) * gTime.Minute).UnixMicro() + } else { + storage = r.StorageInGB * 1024 * 1024 * 1024 + validTill = gTime.Now().AddDate(r.Year, 0, 0).UnixMicro() + } + var err error + switch r.Action { + case ente.ADD: + err = h.StorageBonusRepo.InsertBFBonus(c, r.UserID, validTill, storage) + case ente.UPDATE: + err = h.StorageBonusRepo.UpdateBFBonus(c, r.UserID, validTill, storage) + case ente.REMOVE: + _, err = h.StorageBonusRepo.RemoveBFBonus(c, r.UserID) + } + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + go h.DiscordController.NotifyAdminAction( + fmt.Sprintf("Admin (%d) : User %d %s", adminID, r.UserID, r.UpdateLog())) + c.JSON(http.StatusOK, gin.H{}) +} + +func (h *AdminHandler) RecoverAccount(c *gin.Context) { + err := h.isFreshAdminToken(c) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + var request ente.RecoverAccountRequest + if err := c.ShouldBindJSON(&request); err != nil { + handler.Error(c, stacktrace.Propagate(err, "Bad request")) + return + } + if request.EmailID == "" || !strings.Contains(request.EmailID, "@") { + handler.Error(c, stacktrace.Propagate(errors.New("invalid email"), "Bad request")) + return + } + + go h.DiscordController.NotifyAdminAction( + fmt.Sprintf("Admin (%d) recovering account for %d", auth.GetUserID(c.Request.Header), request.UserID)) + logger := logrus.WithFields(logrus.Fields{ + "user_id": request.UserID, + "admin_id": auth.GetUserID(c.Request.Header), + "user_email": request.EmailID, + "req_id": requestid.Get(c), + "req_ctx": "account_recovery", + }) + logger.Info("Initiate account recovery") + err = h.UserController.HandleAccountRecovery(c, request) + if err != nil { + logger.WithError(err).Error("Failed to recover account") + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + logger.Info("Account successfully recovered") + c.JSON(http.StatusOK, gin.H{}) +} + +func (h *AdminHandler) GetEmailHash(c *gin.Context) { + e := c.Query("email") + hash, err := crypto.GetHash(e, h.HashingKey) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.JSON(http.StatusOK, gin.H{"hash": hash}) +} + +func (h *AdminHandler) GetEmailsFromHashes(c *gin.Context) { + var request ente.GetEmailsFromHashesRequest + if err := c.ShouldBindJSON(&request); err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + emails, err := h.UserRepo.GetEmailsFromHashes(request.Hashes) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.JSON(http.StatusOK, gin.H{"emails": emails}) +} + +func (h *AdminHandler) attachSubscription(ctx *gin.Context, userID int64, response gin.H) { + subscription, err := h.BillingRepo.GetUserSubscription(userID) + if err == nil { + response["subscription"] = subscription + } + details, err := h.UserController.GetDetailsV2(ctx, userID, false, ente.Photos) + if err == nil { + response["details"] = details + } +} + +func (h *AdminHandler) ClearOrphanObjects(c *gin.Context) { + var req ente.ClearOrphanObjectsRequest + err := c.ShouldBindJSON(&req) + if err != nil { + handler.Error(c, stacktrace.Propagate(ente.ErrBadRequest, "")) + return + } + if !h.ObjectCleanupController.IsValidClearOrphanObjectsDC(req.DC) { + handler.Error(c, stacktrace.Propagate(ente.ErrBadRequest, "unsupported dc %s", req.DC)) + return + } + go h.ObjectCleanupController.ClearOrphanObjects(req.DC, req.Prefix, req.ForceTaskLock) + c.JSON(http.StatusOK, gin.H{}) +} diff --git a/server/pkg/api/authenticator.go b/server/pkg/api/authenticator.go new file mode 100644 index 000000000..6b071cf81 --- /dev/null +++ b/server/pkg/api/authenticator.go @@ -0,0 +1,111 @@ +package api + +import ( + "fmt" + "net/http" + + "github.com/ente-io/museum/ente" + model "github.com/ente-io/museum/ente/authenticator" + authenticaor "github.com/ente-io/museum/pkg/controller/authenticator" + "github.com/ente-io/museum/pkg/utils/handler" + "github.com/ente-io/stacktrace" + "github.com/gin-gonic/gin" + "github.com/google/uuid" +) + +// AuthenticatorHandler expose request handlers authenticator related endpoints +type AuthenticatorHandler struct { + Controller *authenticaor.Controller +} + +// CreateKey... +func (h *AuthenticatorHandler) CreateKey(c *gin.Context) { + var request model.CreateKeyRequest + if err := c.ShouldBindJSON(&request); err != nil { + handler.Error(c, + stacktrace.Propagate(ente.ErrBadRequest, fmt.Sprintf("Request binding failed %s", err))) + return + } + err := h.Controller.CreateKey(c, request) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "Failed to create CreateKey")) + return + } + c.Status(http.StatusOK) +} + +// GetKey... +func (h *AuthenticatorHandler) GetKey(c *gin.Context) { + resp, err := h.Controller.GetKey(c) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "failed to getKey")) + return + } + c.JSON(http.StatusOK, resp) +} + +// CreateEntity... +func (h *AuthenticatorHandler) CreateEntity(c *gin.Context) { + var request model.CreateEntityRequest + if err := c.ShouldBindJSON(&request); err != nil { + handler.Error(c, + stacktrace.Propagate(ente.ErrBadRequest, fmt.Sprintf("Request binding failed %s", err))) + return + } + resp, err := h.Controller.CreateEntity(c, request) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "Failed to create CreateEntity")) + return + } + c.JSON(http.StatusOK, resp) +} + +// UpdateEntity... +func (h *AuthenticatorHandler) UpdateEntity(c *gin.Context) { + var request model.UpdateEntityRequest + if err := c.ShouldBindJSON(&request); err != nil { + handler.Error(c, + stacktrace.Propagate(ente.ErrBadRequest, fmt.Sprintf("Request binding failed %s", err))) + return + } + err := h.Controller.UpdateEntity(c, request) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "Failed to update UpdateEntity")) + return + } + c.Status(http.StatusOK) +} + +// DeleteEntity... +func (h *AuthenticatorHandler) DeleteEntity(c *gin.Context) { + id, err := uuid.Parse(c.Query("id")) + if err != nil { + handler.Error(c, stacktrace.Propagate(ente.ErrBadRequest, "failed to find id")) + return + } + _, err = h.Controller.Delete(c, id) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "Failed to delete DeleteEntity")) + return + } + c.Status(http.StatusOK) +} + +// GetDiff... +func (h *AuthenticatorHandler) GetDiff(c *gin.Context) { + var request model.GetEntityDiffRequest + if err := c.ShouldBindQuery(&request); err != nil { + handler.Error(c, + stacktrace.Propagate(ente.ErrBadRequest, fmt.Sprintf("Request binding failed %s", err))) + return + } + + entities, err := h.Controller.GetDiff(c, request) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "Failed to fetch authenticator entity diff")) + return + } + c.JSON(http.StatusOK, gin.H{ + "diff": entities, + }) +} diff --git a/server/pkg/api/billing.go b/server/pkg/api/billing.go new file mode 100644 index 000000000..725011fd5 --- /dev/null +++ b/server/pkg/api/billing.go @@ -0,0 +1,282 @@ +package api + +import ( + b64 "encoding/base64" + "encoding/json" + "fmt" + "io" + + "net/http" + + "github.com/ente-io/museum/pkg/utils/billing" + "github.com/ente-io/museum/pkg/utils/network" + "github.com/gin-contrib/requestid" + + "github.com/ente-io/stacktrace" + log "github.com/sirupsen/logrus" + + "github.com/awa/go-iap/appstore" + "github.com/awa/go-iap/playstore" + + "github.com/ente-io/museum/ente" + "github.com/ente-io/museum/pkg/controller" + "github.com/ente-io/museum/pkg/utils/auth" + "github.com/ente-io/museum/pkg/utils/handler" + "github.com/gin-gonic/gin" +) + +// BillingHandler exposes request handlers for all billing related requests +type BillingHandler struct { + Controller *controller.BillingController + AppStoreController *controller.AppStoreController + PlayStoreController *controller.PlayStoreController + StripeController *controller.StripeController +} + +// GetPlansV2 returns the available default Stripe account subscription plans for the country the client request came from the +func (h *BillingHandler) GetPlansV2(c *gin.Context) { + plans := h.Controller.GetPlansV2(network.GetClientCountry(c), ente.DefaultStripeAccountCountry) + freePlan := billing.GetFreePlan() + + log.Info(log.Fields{ + "req_id": requestid.Get(c), + "plans": fmt.Sprintf("%+v", plans), + "freePlan": fmt.Sprintf("%+v", freePlan), + }) + + c.JSON(http.StatusOK, gin.H{ + "plans": plans, + "freePlan": freePlan, + }) +} + +// GetUserPlans returns the available plans from the stripe account and the country the user's existing plan is from +func (h *BillingHandler) GetUserPlans(c *gin.Context) { + userID := auth.GetUserID(c.Request.Header) + plans, err := h.Controller.GetUserPlans(c, userID) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "Failed to user plans")) + return + } + freePlan := billing.GetFreePlan() + + log.Info(log.Fields{ + "user_id": userID, + "req_id": requestid.Get(c), + "plans": fmt.Sprintf("%+v", plans), + "freePlan": fmt.Sprintf("%+v", freePlan), + }) + + c.JSON(http.StatusOK, gin.H{ + "plans": plans, + "freePlan": freePlan, + }) +} + +// GetStripeAccountCountry returns the stripe account country the user's existing plan is from +// if he doesn't have default stripe account country is returned +func (h *BillingHandler) GetStripeAccountCountry(c *gin.Context) { + userID := auth.GetUserID(c.Request.Header) + stripeAccountCountry, err := h.Controller.GetStripeAccountCountry(userID) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "Failed to get stripe account country")) + return + } + c.JSON(http.StatusOK, gin.H{ + "stripeAccountCountry": stripeAccountCountry, + }) +} + +// Deprecated: +// GetUsage returns the storage usage for the requesting user +func (h *BillingHandler) GetUsage(c *gin.Context) { + // status code to indicate that endpoint is deprecated + c.JSON(http.StatusGone, gin.H{ + "message": "This endpoint is deprecated.", + }) +} + +// GetSubscription returns the current subscription for a user if any +func (h *BillingHandler) GetSubscription(c *gin.Context) { + userID := auth.GetUserID(c.Request.Header) + subscription, err := h.Controller.GetSubscription(c, userID) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.JSON(http.StatusOK, gin.H{ + "subscription": subscription, + }) +} + +// VerifySubscription verifies and returns the verified subscription +func (h *BillingHandler) VerifySubscription(c *gin.Context) { + userID := auth.GetUserID(c.Request.Header) + var request ente.SubscriptionVerificationRequest + if err := c.ShouldBindJSON(&request); err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + subscription, err := h.Controller.VerifySubscription(userID, + request.PaymentProvider, request.ProductID, request.VerificationData) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.JSON(http.StatusOK, gin.H{ + "subscription": subscription, + }) +} + +// AndroidNotificationHandler handles the notifications from PlayStore +func (h *BillingHandler) AndroidNotificationHandler(c *gin.Context) { + var request ente.AndroidNotification + if err := c.ShouldBindJSON(&request); err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + decoded, err := b64.StdEncoding.DecodeString(request.Message.Data) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + log.Println("Received notification " + string(decoded)) + var notification playstore.DeveloperNotification + err = json.Unmarshal(decoded, ¬ification) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + if notification.TestNotification.Version == "1.0" { + log.Info("Ignoring test notification") + } else { + err = h.PlayStoreController.HandleNotification(notification) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + } + c.JSON(http.StatusOK, gin.H{}) +} + +// IOSNotificationHandler handles the notifications from AppStore +func (h *BillingHandler) IOSNotificationHandler(c *gin.Context) { + var notification appstore.SubscriptionNotification + if err := c.ShouldBindJSON(¬ification); err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + + err := h.AppStoreController.HandleNotification(c, notification) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + + c.JSON(http.StatusOK, gin.H{}) +} + +// GetCheckoutSession generates and returns stripe checkout session for subscription purchase +func (h *BillingHandler) GetCheckoutSession(c *gin.Context) { + userID := auth.GetUserID(c.Request.Header) + productID := c.Query("productID") + redirectRootURL, err := h.Controller.GetRedirectURL(c) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + sessionID, err := h.StripeController.GetCheckoutSession(productID, userID, redirectRootURL) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.JSON(http.StatusOK, gin.H{"sessionID": sessionID}) +} + +// StripeINNotificationHandler handles the notifications from older StripeIN account +func (h *BillingHandler) StripeINNotificationHandler(c *gin.Context) { + notification, err := io.ReadAll(c.Request.Body) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + stripeSignature := c.GetHeader(ente.StripeSignature) + err = h.StripeController.HandleINNotification(notification, stripeSignature) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + + c.JSON(http.StatusOK, gin.H{}) +} + +// StripeUSNotificationHandler handles the notifications from new StripeUS account +func (h *BillingHandler) StripeUSNotificationHandler(c *gin.Context) { + notification, err := io.ReadAll(c.Request.Body) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + stripeSignature := c.GetHeader(ente.StripeSignature) + err = h.StripeController.HandleUSNotification(notification, stripeSignature) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + + c.JSON(http.StatusOK, gin.H{}) +} + +// StripeUpdateSubscription handles stripe subscription updates requests +func (h *BillingHandler) StripeUpdateSubscription(c *gin.Context) { + userID := auth.GetUserID(c.Request.Header) + var request ente.StripeUpdateRequest + if err := c.ShouldBindJSON(&request); err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + s, err := h.StripeController.UpdateSubscription(request.ProductID, userID) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.JSON(http.StatusOK, gin.H{"result": s}) +} + +// StripeCancelSubscription handles stripe subscription cancel requests +func (h *BillingHandler) StripeCancelSubscription(c *gin.Context) { + userID := auth.GetUserID(c.Request.Header) + subscription, err := h.StripeController.UpdateSubscriptionCancellationStatus(userID, true) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.JSON(http.StatusOK, gin.H{"subscription": subscription}) +} + +// StripeActivateSubscription handles stripe subscription activation requests +func (h *BillingHandler) StripeActivateSubscription(c *gin.Context) { + userID := auth.GetUserID(c.Request.Header) + subscription, err := h.StripeController.UpdateSubscriptionCancellationStatus(userID, false) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.JSON(http.StatusOK, gin.H{"subscription": subscription}) +} + +// GetStripeCustomerPortal handles stripe customer portal url retrieval request +func (h *BillingHandler) GetStripeCustomerPortal(c *gin.Context) { + userID := auth.GetUserID(c.Request.Header) + redirectRootURL, err := h.Controller.GetRedirectURL(c) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + url, err := h.StripeController.GetStripeCustomerPortal(userID, redirectRootURL) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.JSON(http.StatusOK, gin.H{"url": url}) +} diff --git a/server/pkg/api/cast.go b/server/pkg/api/cast.go new file mode 100644 index 000000000..62d5c9478 --- /dev/null +++ b/server/pkg/api/cast.go @@ -0,0 +1,144 @@ +package api + +import ( + entity "github.com/ente-io/museum/ente/cast" + "github.com/ente-io/museum/pkg/controller/cast" + "net/http" + "strconv" + + "github.com/ente-io/museum/ente" + "github.com/ente-io/museum/pkg/controller" + "github.com/ente-io/museum/pkg/utils/handler" + "github.com/ente-io/stacktrace" + "github.com/gin-gonic/gin" +) + +// CastHandler exposes request handlers for publicly accessible collections +type CastHandler struct { + FileCtrl *controller.FileController + CollectionCtrl *controller.CollectionController + Ctrl *cast.Controller +} + +func (h *CastHandler) RegisterDevice(c *gin.Context) { + var request entity.RegisterDeviceRequest + if err := c.ShouldBindJSON(&request); err != nil { + handler.Error(c, stacktrace.Propagate(err, "failed to bind")) + return + } + code, err := h.Ctrl.RegisterDevice(c, &request) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "failed to register device")) + return + } + c.JSON(http.StatusOK, gin.H{ + "deviceCode": code, + }) +} + +func (h *CastHandler) GetDeviceInfo(c *gin.Context) { + deviceCode := getDeviceCode(c) + publicKey, err := h.Ctrl.GetPublicKey(c, deviceCode) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "failed to get public key")) + return + } + c.JSON(http.StatusOK, gin.H{ + "publicKey": publicKey, + }) +} + +func (h *CastHandler) InsertCastData(c *gin.Context) { + var request entity.CastRequest + if err := c.ShouldBindJSON(&request); err != nil { + handler.Error(c, stacktrace.Propagate(err, "failed to bind")) + return + } + err := h.Ctrl.InsertCastData(c, &request) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "failed to start cast")) + return + } + c.JSON(http.StatusOK, gin.H{}) +} + +// RevokeAllToken disable all active cast token for a user +func (h *CastHandler) RevokeAllToken(c *gin.Context) { + err := h.Ctrl.RevokeAllToken(c) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "failed to start cast")) + return + } + c.JSON(http.StatusOK, gin.H{}) +} + +func (h *CastHandler) GetCastData(c *gin.Context) { + deviceCode := getDeviceCode(c) + encCastData, err := h.Ctrl.GetEncCastData(c, deviceCode) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "failed to get encrypted payload")) + return + } + c.JSON(http.StatusOK, gin.H{ + "encCastData": encCastData, + }) + +} + +// GetFile redirects the request to the file location +func (h *CastHandler) GetFile(c *gin.Context) { + h.getFileForType(c, ente.FILE) +} + +// GetThumbnail redirects the request to the file's thumbnail location +func (h *CastHandler) GetThumbnail(c *gin.Context) { + h.getFileForType(c, ente.THUMBNAIL) +} + +// GetCollection redirects the request to the collection location +func (h *CastHandler) GetCollection(c *gin.Context) { + collection, err := h.CollectionCtrl.GetCastCollection(c) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.JSON(http.StatusOK, gin.H{ + "collection": collection, + }) +} + +// GetDiff returns the diff within a collection since a timestamp +func (h *CastHandler) GetDiff(c *gin.Context) { + sinceTime, err := strconv.ParseInt(c.Query("sinceTime"), 10, 64) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + files, hasMore, err := h.CollectionCtrl.GetCastDiff(c, sinceTime) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.JSON(http.StatusOK, gin.H{ + "diff": files, + "hasMore": hasMore, + }) +} + +func getDeviceCode(c *gin.Context) string { + return c.Param("deviceCode") +} + +func (h *CastHandler) getFileForType(c *gin.Context, objectType ente.ObjectType) { + fileID, err := strconv.ParseInt(c.Param("fileID"), 10, 64) + if err != nil { + handler.Error(c, stacktrace.Propagate(ente.ErrBadRequest, "")) + return + } + url, err := h.FileCtrl.GetCastFileUrl(c, fileID, objectType) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.Redirect(http.StatusTemporaryRedirect, url) +} diff --git a/server/pkg/api/collection.go b/server/pkg/api/collection.go new file mode 100644 index 000000000..fff98e7f4 --- /dev/null +++ b/server/pkg/api/collection.go @@ -0,0 +1,446 @@ +package api + +import ( + "fmt" + "net/http" + "strconv" + + "github.com/ente-io/stacktrace" + log "github.com/sirupsen/logrus" + + "github.com/ente-io/museum/ente" + "github.com/ente-io/museum/pkg/controller" + "github.com/ente-io/museum/pkg/utils/auth" + "github.com/ente-io/museum/pkg/utils/handler" + "github.com/ente-io/museum/pkg/utils/time" + "github.com/gin-gonic/gin" +) + +// CollectionHandler exposes request handlers for all collection related requests +type CollectionHandler struct { + Controller *controller.CollectionController +} + +// Create creates a collection +func (h *CollectionHandler) Create(c *gin.Context) { + log.Info("Collection create") + var collection ente.Collection + if err := c.ShouldBindJSON(&collection); err != nil { + handler.Error(c, stacktrace.Propagate(err, "Could not bind request params")) + return + } + + collection.App = string(auth.GetApp(c)) + collection.UpdationTime = time.Microseconds() + collection, err := h.Controller.Create(collection, + auth.GetUserID(c.Request.Header)) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "Could not create collection")) + return + } + c.JSON(http.StatusOK, gin.H{ + "collection": collection, + }) +} + +// GetCollectionByID returns the collection for given ID. +func (h *CollectionHandler) GetCollectionByID(c *gin.Context) { + cID, err := strconv.ParseInt(c.Param("collectionID"), 10, 64) + if err != nil { + handler.Error(c, ente.ErrBadRequest) + return + } + userID := auth.GetUserID(c.Request.Header) + collection, err := h.Controller.GetCollection(c, userID, cID) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.JSON(http.StatusOK, gin.H{ + "collection": collection, + }) +} + +// Deprecated: Remove once rps goes to 0. +// Get returns the list of collections accessible to a user. +func (h *CollectionHandler) Get(c *gin.Context) { + userID := auth.GetUserID(c.Request.Header) + sinceTime, _ := strconv.ParseInt(c.Query("sinceTime"), 10, 64) + + app := auth.GetApp(c) + + // TODO: Compute both with a single query + ownedCollections, err := h.Controller.GetOwned(userID, sinceTime, app) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "Failed to get owned collections")) + return + } + sharedCollections, err := h.Controller.GetSharedWith(userID, sinceTime, app) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "Failed to get shared collections")) + return + } + c.JSON(http.StatusOK, gin.H{ + "collections": append(ownedCollections, sharedCollections...), + }) +} + +// GetV2 returns the list of collections accessible to a user +func (h *CollectionHandler) GetV2(c *gin.Context) { + userID := auth.GetUserID(c.Request.Header) + sinceTime, _ := strconv.ParseInt(c.Query("sinceTime"), 10, 64) + app := auth.GetApp(c) + ownedCollections, err := h.Controller.GetOwnedV2(userID, sinceTime, app) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "Failed to get owned collections")) + return + } + sharedCollections, err := h.Controller.GetSharedWith(userID, sinceTime, app) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "Failed to get shared collections")) + return + } + c.JSON(http.StatusOK, gin.H{ + "collections": append(ownedCollections, sharedCollections...), + }) +} + +// Share shares a collection with a user +func (h *CollectionHandler) Share(c *gin.Context) { + var request ente.AlterShareRequest + if err := c.ShouldBindJSON(&request); err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + resp, err := h.Controller.Share(c, request) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.JSON(http.StatusOK, gin.H{ + "sharees": resp, + }) +} + +// ShareURL generates a publicly sharable url +func (h *CollectionHandler) ShareURL(c *gin.Context) { + var request ente.CreatePublicAccessTokenRequest + if err := c.ShouldBindJSON(&request); err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + // todo:[2/Sep/23] change device limit to 0 once both web and mobile clients are updated + request.DeviceLimit = controller.DeviceLimitThreshold + response, err := h.Controller.ShareURL(c, auth.GetUserID(c.Request.Header), request) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.JSON(http.StatusOK, gin.H{ + "result": response, + }) +} + +// UpdateShareURL generates a publicly sharable url +func (h *CollectionHandler) UpdateShareURL(c *gin.Context) { + var req ente.UpdatePublicAccessTokenRequest + if err := c.ShouldBindJSON(&req); err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + if req.DeviceLimit == nil && req.ValidTill == nil && req.DisablePassword == nil && + req.Nonce == nil && req.PassHash == nil && req.EnableDownload == nil && req.EnableCollect == nil { + handler.Error(c, stacktrace.Propagate(ente.ErrBadRequest, "all parameters are missing")) + return + } + + if req.DeviceLimit != nil && (*req.DeviceLimit < 0 || *req.DeviceLimit > controller.DeviceLimitThreshold) { + handler.Error(c, stacktrace.Propagate(ente.ErrBadRequest, fmt.Sprintf("device limit: %d out of range", *req.DeviceLimit))) + return + } + + if req.ValidTill != nil && *req.ValidTill != 0 && *req.ValidTill < time.Microseconds() { + handler.Error(c, stacktrace.Propagate(ente.ErrBadRequest, "valid till should be greater than current timestamp")) + return + } + + var allPassParamsMissing = req.Nonce == nil && req.PassHash == nil && req.MemLimit == nil && req.OpsLimit == nil + var allPassParamsPresent = req.Nonce != nil && req.PassHash != nil && req.MemLimit != nil && req.OpsLimit != nil + + if !(allPassParamsMissing || allPassParamsPresent) { + handler.Error(c, stacktrace.Propagate(ente.ErrBadRequest, "all password params should be either present or missing")) + return + } + + if allPassParamsPresent && req.DisablePassword != nil && *req.DisablePassword { + handler.Error(c, stacktrace.Propagate(ente.ErrBadRequest, "can not set and disable password in same request")) + return + } + + response, err := h.Controller.UpdateShareURL(c, auth.GetUserID(c.Request.Header), req) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.JSON(http.StatusOK, gin.H{ + "result": response, + }) +} + +// UnShareURL disable all shared urls for the given collectionID +func (h *CollectionHandler) UnShareURL(c *gin.Context) { + cID, err := strconv.ParseInt(c.Param("collectionID"), 10, 64) + if err != nil { + handler.Error(c, stacktrace.Propagate(ente.ErrBadRequest, "")) + return + } + userID := auth.GetUserID(c.Request.Header) + err = h.Controller.DisableSharedURL(c, userID, cID) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.Status(http.StatusOK) +} + +// UnShare unshares a collection with a user +func (h *CollectionHandler) UnShare(c *gin.Context) { + var request ente.AlterShareRequest + if err := c.ShouldBindJSON(&request); err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + resp, err := h.Controller.UnShare(c, request.CollectionID, auth.GetUserID(c.Request.Header), request.Email) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.JSON(http.StatusOK, gin.H{ + "sharees": resp, + }) +} + +// Leave allows user to leave a shared collection, which is not owned by them +func (h *CollectionHandler) Leave(c *gin.Context) { + cID, err := strconv.ParseInt(c.Param("collectionID"), 10, 64) + if err != nil { + handler.Error(c, stacktrace.Propagate(ente.ErrBadRequest, "")) + return + } + err = h.Controller.Leave(c, cID) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.Status(http.StatusOK) +} + +// AddFiles adds files to a collection +func (h *CollectionHandler) AddFiles(c *gin.Context) { + var request ente.AddFilesRequest + if err := c.ShouldBindJSON(&request); err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + if len(request.Files) > DefaultMaxBatchSize { + handler.Error(c, stacktrace.Propagate(ente.ErrBatchSizeTooLarge, "")) + return + } + + if err := h.Controller.AddFiles(c, auth.GetUserID(c.Request.Header), request.Files, request.CollectionID); err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.Status(http.StatusOK) +} + +// RestoreFiles adds files from trash to given collection +func (h *CollectionHandler) RestoreFiles(c *gin.Context) { + var request ente.AddFilesRequest + if err := c.ShouldBindJSON(&request); err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + + if len(request.Files) > DefaultMaxBatchSize { + handler.Error(c, stacktrace.Propagate(ente.ErrBatchSizeTooLarge, "")) + return + } + + if err := h.Controller.RestoreFiles(c, auth.GetUserID(c.Request.Header), request.CollectionID, request.Files); err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.Status(http.StatusOK) +} + +// Movefiles from one collection to another +func (h *CollectionHandler) MoveFiles(c *gin.Context) { + var request ente.MoveFilesRequest + if err := c.ShouldBindJSON(&request); err != nil { + handler.Error(c, + stacktrace.Propagate(ente.ErrBadRequest, fmt.Sprintf("Request binding failed %s", err))) + return + } + if len(request.Files) > DefaultMaxBatchSize { + handler.Error(c, stacktrace.Propagate(ente.ErrBatchSizeTooLarge, "")) + return + } + if request.ToCollectionID == request.FromCollectionID { + handler.Error(c, stacktrace.Propagate(ente.ErrBadRequest, "to and fromCollection should be different")) + return + } + + if err := h.Controller.MoveFiles(c, request); err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.Status(http.StatusOK) +} + +// RemoveFilesV3 allow removing files from a collection when files and collection belong to two different users +func (h *CollectionHandler) RemoveFilesV3(c *gin.Context) { + var request ente.RemoveFilesV3Request + if err := c.ShouldBindJSON(&request); err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + if len(request.FileIDs) > DefaultMaxBatchSize { + handler.Error(c, stacktrace.Propagate(ente.ErrBatchSizeTooLarge, "")) + return + } + if err := h.Controller.RemoveFilesV3(c, request); err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.Status(http.StatusOK) +} + +// GetDiffV2 returns the diff within a collection since a timestamp +func (h *CollectionHandler) GetDiffV2(c *gin.Context) { + userID := auth.GetUserID(c.Request.Header) + cID, _ := strconv.ParseInt(c.Query("collectionID"), 10, 64) + sinceTime, _ := strconv.ParseInt(c.Query("sinceTime"), 10, 64) + files, hasMore, err := h.Controller.GetDiffV2(c, cID, userID, sinceTime) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.JSON(http.StatusOK, gin.H{ + "diff": files, + "hasMore": hasMore, + }) +} + +// GetFile returns the diff within a collection since a timestamp +func (h *CollectionHandler) GetFile(c *gin.Context) { + cID, _ := strconv.ParseInt(c.Query("collectionID"), 10, 64) + fileID, _ := strconv.ParseInt(c.Query("fileID"), 10, 64) + file, err := h.Controller.GetFile(c, cID, fileID) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.JSON(http.StatusOK, gin.H{ + "file": file, + }) +} + +// GetSharees returns the list of users a collection has been shared with +func (h *CollectionHandler) GetSharees(c *gin.Context) { + userID := auth.GetUserID(c.Request.Header) + cID, _ := strconv.ParseInt(c.Query("collectionID"), 10, 64) + sharees, err := h.Controller.GetSharees(c, cID, userID) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.JSON(http.StatusOK, gin.H{ + "sharees": sharees, + }) +} + +// Trash deletes a given collection and move file exclusive to the collection to trash +func (h *CollectionHandler) Trash(c *gin.Context) { + cID, _ := strconv.ParseInt(c.Param("collectionID"), 10, 64) + userID := auth.GetUserID(c.Request.Header) + err := h.Controller.Trash(c, userID, cID) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.Status(http.StatusOK) +} + +func (h *CollectionHandler) TrashV3(c *gin.Context) { + var req ente.TrashCollectionV3Request + if err := c.ShouldBindQuery(&req); err != nil { + handler.Error(c, + stacktrace.Propagate(ente.ErrBadRequest, fmt.Sprintf("Request binding failed %s", err))) + return + } + + err := h.Controller.TrashV3(c, req) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.Status(http.StatusOK) +} + +// Rename updates the collection's name +func (h *CollectionHandler) Rename(c *gin.Context) { + var request ente.RenameRequest + if err := c.ShouldBindJSON(&request); err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + if err := h.Controller.Rename(auth.GetUserID(c.Request.Header), request.CollectionID, request.EncryptedName, request.NameDecryptionNonce); err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.Status(http.StatusOK) +} + +// Updates the magic metadata for a collection +func (h *CollectionHandler) PrivateMagicMetadataUpdate(c *gin.Context) { + var request ente.UpdateCollectionMagicMetadata + if err := c.ShouldBindJSON(&request); err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + if err := h.Controller.UpdateMagicMetadata(c, request, false); err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.Status(http.StatusOK) +} + +// PublicMagicMetadataUpdate updates the public magic metadata for a collection +func (h *CollectionHandler) PublicMagicMetadataUpdate(c *gin.Context) { + var request ente.UpdateCollectionMagicMetadata + if err := c.ShouldBindJSON(&request); err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + if err := h.Controller.UpdateMagicMetadata(c, request, true); err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.Status(http.StatusOK) +} + +// ShareeMagicMetadataUpdate updates sharees magic metadata for shared collection. +func (h *CollectionHandler) ShareeMagicMetadataUpdate(c *gin.Context) { + var request ente.UpdateCollectionMagicMetadata + if err := c.ShouldBindJSON(&request); err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + err := h.Controller.UpdateShareeMagicMetadata(c, request) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.Status(http.StatusOK) +} diff --git a/server/pkg/api/embedding.go b/server/pkg/api/embedding.go new file mode 100644 index 000000000..983bed52c --- /dev/null +++ b/server/pkg/api/embedding.go @@ -0,0 +1,61 @@ +package api + +import ( + "fmt" + "net/http" + + "github.com/ente-io/museum/ente" + "github.com/ente-io/museum/pkg/controller/embedding" + "github.com/ente-io/museum/pkg/utils/handler" + + "github.com/ente-io/stacktrace" + "github.com/gin-gonic/gin" +) + +type EmbeddingHandler struct { + Controller *embedding.Controller +} + +// InsertOrUpdate handler for inserting or updating embedding +func (h *EmbeddingHandler) InsertOrUpdate(c *gin.Context) { + var request ente.InsertOrUpdateEmbeddingRequest + if err := c.ShouldBindJSON(&request); err != nil { + handler.Error(c, + stacktrace.Propagate(ente.ErrBadRequest, fmt.Sprintf("Request binding failed %s", err))) + return + } + embedding, err := h.Controller.InsertOrUpdate(c, request) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.JSON(http.StatusOK, embedding) +} + +// GetDiff handler for getting diff of embedding +func (h *EmbeddingHandler) GetDiff(c *gin.Context) { + var request ente.GetEmbeddingDiffRequest + if err := c.ShouldBindQuery(&request); err != nil { + handler.Error(c, + stacktrace.Propagate(ente.ErrBadRequest, fmt.Sprintf("Request binding failed %s", err))) + return + } + embeddings, err := h.Controller.GetDiff(c, request) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.JSON(http.StatusOK, gin.H{ + "diff": embeddings, + }) +} + +// DeleteAll handler for deleting all embeddings for the user +func (h *EmbeddingHandler) DeleteAll(c *gin.Context) { + err := h.Controller.DeleteAll(c) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.Status(http.StatusOK) +} diff --git a/server/pkg/api/family.go b/server/pkg/api/family.go new file mode 100644 index 000000000..c37b3dc73 --- /dev/null +++ b/server/pkg/api/family.go @@ -0,0 +1,132 @@ +package api + +import ( + "net/http" + + "github.com/ente-io/museum/pkg/controller/family" + "github.com/ente-io/stacktrace" + "github.com/google/uuid" + + // "github.com/gin-contrib/requestid" + // log "github.com/sirupsen/logrus" + + "github.com/ente-io/museum/ente" + "github.com/ente-io/museum/pkg/utils/auth" + "github.com/ente-io/museum/pkg/utils/handler" + + // "github.com/ente-io/museum/pkg/utils/time" + "github.com/gin-gonic/gin" +) + +// FamilyHandler contains handlers for managing family plans +type FamilyHandler struct { + Controller *family.Controller +} + +// CreateFamily creates a family with current user as admin member +func (h *FamilyHandler) CreateFamily(c *gin.Context) { + err := h.Controller.CreateFamily(c, auth.GetUserID(c.Request.Header)) + + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.Status(http.StatusOK) +} + +// InviteMember sends out invitation to a user for joining acting user's family plan +func (h *FamilyHandler) InviteMember(c *gin.Context) { + var request ente.InviteMemberRequest + if err := c.ShouldBindJSON(&request); err != nil { + handler.Error(c, stacktrace.Propagate(err, "Could not bind request params")) + return + } + err := h.Controller.InviteMember(c, auth.GetUserID(c.Request.Header), request.Email) + + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.Status(http.StatusOK) +} + +// FetchMembers returns information about members who have been invited (only for admin) or are part of family plan +func (h *FamilyHandler) FetchMembers(c *gin.Context) { + members, err := h.Controller.FetchMembers(c, auth.GetUserID(c.Request.Header)) + + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.JSON(http.StatusOK, members) +} + +// RemoveMember removes the member from the family group +func (h *FamilyHandler) RemoveMember(c *gin.Context) { + familyMembershipID, err := uuid.Parse(c.Param("id")) + if err != nil { + handler.Error(c, stacktrace.Propagate(ente.ErrBadRequest, "failed to find valid uuid")) + return + } + err = h.Controller.RemoveMember(c, auth.GetUserID(c.Request.Header), familyMembershipID) + + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.Status(http.StatusOK) +} + +// Leave family +func (h *FamilyHandler) Leave(c *gin.Context) { + err := h.Controller.LeaveFamily(c, auth.GetUserID(c.Request.Header)) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.Status(http.StatusOK) +} + +// RevokeInvite removes the invite for given user as long it's still in invite state +func (h *FamilyHandler) RevokeInvite(c *gin.Context) { + familyMembershipID, err := uuid.Parse(c.Param("id")) + if err != nil { + handler.Error(c, stacktrace.Propagate(ente.ErrBadRequest, "failed to find valid uuid")) + return + } + + err = h.Controller.RevokeInvite(c, auth.GetUserID(c.Request.Header), familyMembershipID) + + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.Status(http.StatusOK) +} + +// AcceptInvite allows user to join the family based on the token +func (h *FamilyHandler) AcceptInvite(c *gin.Context) { + var request ente.AcceptInviteRequest + if err := c.ShouldBindJSON(&request); err != nil { + handler.Error(c, stacktrace.Propagate(err, "Could not bind request params")) + return + } + + response, err := h.Controller.AcceptInvite(c, request.Token) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.JSON(http.StatusOK, response) +} + +// GetInviteInfo returns basic information about invitor/admin as long as the invite is valid +func (h *FamilyHandler) GetInviteInfo(c *gin.Context) { + inviteToken := c.Param("token") + response, err := h.Controller.InviteInfo(c, inviteToken) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.JSON(http.StatusOK, response) +} diff --git a/server/pkg/api/file.go b/server/pkg/api/file.go new file mode 100644 index 000000000..a65b9e383 --- /dev/null +++ b/server/pkg/api/file.go @@ -0,0 +1,372 @@ +package api + +import ( + "net/http" + "os" + "strconv" + "strings" + + "github.com/ente-io/stacktrace" + "github.com/gin-contrib/requestid" + log "github.com/sirupsen/logrus" + + "github.com/ente-io/museum/ente" + "github.com/ente-io/museum/pkg/controller" + "github.com/ente-io/museum/pkg/utils/auth" + "github.com/ente-io/museum/pkg/utils/handler" + "github.com/ente-io/museum/pkg/utils/time" + "github.com/gin-gonic/gin" +) + +// FileHandler exposes request handlers for all encrypted file related requests +type FileHandler struct { + Controller *controller.FileController +} + +// DefaultMaxBatchSize is the default maximum API batch size unless specified otherwise +const DefaultMaxBatchSize = 1000 + +// CreateOrUpdate creates an entry for a file +func (h *FileHandler) CreateOrUpdate(c *gin.Context) { + userID := auth.GetUserID(c.Request.Header) + var file ente.File + if err := c.ShouldBindJSON(&file); err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + file.UpdationTime = time.Microseconds() + + // get an ente.App from the ?app= query parameter with a default of photos + enteApp := auth.GetApp(c) + + if file.ID == 0 { + file.OwnerID = userID + file.IsDeleted = false + file, err := h.Controller.Create(c, userID, file, c.Request.UserAgent(), enteApp) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.JSON(http.StatusOK, file) + return + } + response, err := h.Controller.Update(c, userID, file, enteApp) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.JSON(http.StatusOK, response) +} + +// Update updates already existing file +func (h *FileHandler) Update(c *gin.Context) { + enteApp := auth.GetApp(c) + + userID := auth.GetUserID(c.Request.Header) + var file ente.File + if err := c.ShouldBindJSON(&file); err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + file.UpdationTime = time.Microseconds() + if file.ID <= 0 { + handler.Error(c, stacktrace.Propagate(ente.ErrBadRequest, "fileID should be >0")) + return + } + response, err := h.Controller.Update(c, userID, file, enteApp) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.JSON(http.StatusOK, response) +} + +// GetUploadURLs returns a bunch of urls where in the user can upload objects +func (h *FileHandler) GetUploadURLs(c *gin.Context) { + enteApp := auth.GetApp(c) + + userID := auth.GetUserID(c.Request.Header) + count, _ := strconv.Atoi(c.Query("count")) + urls, err := h.Controller.GetUploadURLs(c, userID, count, enteApp) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.JSON(http.StatusOK, gin.H{ + "urls": urls, + }) +} + +// GetMultipartUploadURLs returns an array of PartUpload PresignedURLs +func (h *FileHandler) GetMultipartUploadURLs(c *gin.Context) { + enteApp := auth.GetApp(c) + + userID := auth.GetUserID(c.Request.Header) + count, _ := strconv.Atoi(c.Query("count")) + urls, err := h.Controller.GetMultipartUploadURLs(c, userID, count, enteApp) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.JSON(http.StatusOK, gin.H{ + "urls": urls, + }) +} + +// Get redirects the request to the file location +func (h *FileHandler) Get(c *gin.Context) { + userID, fileID := getUserAndFileIDs(c) + url, err := h.Controller.GetFileURL(userID, fileID) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + h.logBadRedirect(c) + c.Redirect(http.StatusTemporaryRedirect, url) +} + +// GetV2 returns the URL of the file to client +func (h *FileHandler) GetV2(c *gin.Context) { + userID, fileID := getUserAndFileIDs(c) + url, err := h.Controller.GetFileURL(userID, fileID) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.JSON(http.StatusOK, gin.H{ + "url": url, + }) +} + +// GetThumbnail redirects the request to the file's thumbnail location +func (h *FileHandler) GetThumbnail(c *gin.Context) { + userID, fileID := getUserAndFileIDs(c) + url, err := h.Controller.GetThumbnailURL(userID, fileID) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + h.logBadRedirect(c) + c.Redirect(http.StatusTemporaryRedirect, url) +} + +// GetThumbnailV2 returns the URL of the thumbnail to the client +func (h *FileHandler) GetThumbnailV2(c *gin.Context) { + userID, fileID := getUserAndFileIDs(c) + url, err := h.Controller.GetThumbnailURL(userID, fileID) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.JSON(http.StatusOK, gin.H{ + "url": url, + }) +} + +// Trash moves the given files to the trash bin +func (h *FileHandler) Trash(c *gin.Context) { + var request ente.TrashRequest + if err := c.ShouldBindJSON(&request); err != nil { + handler.Error(c, stacktrace.Propagate(err, "failed to bind")) + return + } + if len(request.TrashItems) > DefaultMaxBatchSize { + handler.Error(c, stacktrace.Propagate(ente.ErrBatchSizeTooLarge, "")) + return + } + userID := auth.GetUserID(c.Request.Header) + request.OwnerID = userID + err := h.Controller.Trash(c, userID, request) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + } else { + c.Status(http.StatusOK) + } +} + +// GetSize returns the size of files indicated by fileIDs +func (h *FileHandler) GetSize(c *gin.Context) { + var request ente.FileIDsRequest + if err := c.ShouldBindJSON(&request); err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + userID := auth.GetUserID(c.Request.Header) + shouldReject, err := shouldRejectRequest(c) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + if shouldReject { + c.Status(http.StatusUpgradeRequired) + return + } + + size, err := h.Controller.GetSize(userID, request.FileIDs) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + } else { + c.JSON(http.StatusOK, gin.H{ + "size": size, + }) + } +} + +// GetInfo returns the FileInfo of files indicated by fileIDs +func (h *FileHandler) GetInfo(c *gin.Context) { + var request ente.FileIDsRequest + if err := c.ShouldBindJSON(&request); err != nil { + handler.Error(c, stacktrace.Propagate(err, "failed to bind request")) + return + } + userID := auth.GetUserID(c.Request.Header) + + response, err := h.Controller.GetFileInfo(c, userID, request.FileIDs) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + } else { + c.JSON(http.StatusOK, response) + } +} + +// shouldRejectRequest return true if the client which is making the request +// is Android client with version less than 0.5.36 +func shouldRejectRequest(c *gin.Context) (bool, error) { + userAgent := c.GetHeader("User-Agent") + clientVersion := c.GetHeader("X-Client-Version") + clientPkg := c.GetHeader("X-Client-Package") + + if !strings.Contains(strings.ToLower(userAgent), "android") { + return false, nil + } + + if clientPkg == "io.ente.photos.fdroid" { + return false, nil + } + + versionSplit := strings.Split(clientVersion, ".") + + if len(versionSplit) != 3 { + return false, nil + } + if versionSplit[0] != "0" { + return false, nil + } + minorVersion, err := strconv.Atoi(versionSplit[1]) + if err != nil { + // avoid reject when parsing fails + return false, nil + } + patchVersion, err := strconv.Atoi(versionSplit[2]) + if err != nil { + // avoid reject when parsing fails + return false, nil + } + shouldReject := minorVersion <= 5 && patchVersion <= 35 + if shouldReject { + log.Warnf("request rejected from older client with version %s", clientVersion) + } + return shouldReject, nil +} + +// GetDuplicates returns the list of files of the same size +func (h *FileHandler) GetDuplicates(c *gin.Context) { + userID := auth.GetUserID(c.Request.Header) + dupes, err := h.Controller.GetDuplicates(userID) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.JSON(http.StatusOK, gin.H{ + "duplicates": dupes, + }) +} + +// GetLargeThumbnail returns the list of files whose thumbnail size is larger than threshold size +func (h *FileHandler) GetLargeThumbnailFiles(c *gin.Context) { + userID := auth.GetUserID(c.Request.Header) + threshold, _ := strconv.ParseInt(c.Query("threshold"), 10, 64) + largeThumbnailFiles, err := h.Controller.GetLargeThumbnailFiles(userID, threshold) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.JSON(http.StatusOK, gin.H{ + "largeThumbnailFiles": largeThumbnailFiles, + }) +} + +// UpdateMagicMetadata updates magic metadata for a list of files. +func (h *FileHandler) UpdateMagicMetadata(c *gin.Context) { + var request ente.UpdateMultipleMagicMetadataRequest + if err := c.ShouldBindJSON(&request); err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + if len(request.MetadataList) > DefaultMaxBatchSize { + handler.Error(c, stacktrace.Propagate(ente.ErrBatchSizeTooLarge, "")) + return + } + err := h.Controller.UpdateMagicMetadata(c, request, false) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.Status(http.StatusOK) +} + +// UpdatePublicMagicMetadata updates public magic metadata for a list of files. +func (h *FileHandler) UpdatePublicMagicMetadata(c *gin.Context) { + var request ente.UpdateMultipleMagicMetadataRequest + if err := c.ShouldBindJSON(&request); err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + err := h.Controller.UpdateMagicMetadata(c, request, true) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.Status(http.StatusOK) +} + +// UpdateThumbnail updates thumbnail of a file +func (h *FileHandler) UpdateThumbnail(c *gin.Context) { + enteApp := auth.GetApp(c) + + var request ente.UpdateThumbnailRequest + if err := c.ShouldBindJSON(&request); err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + err := h.Controller.UpdateThumbnail(c, request.FileID, request.Thumbnail, enteApp) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.Status(http.StatusOK) +} + +func (h *FileHandler) GetTotalFileCount(c *gin.Context) { + count, err := h.Controller.GetTotalFileCount() + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.JSON(http.StatusOK, gin.H{ + "count": count, + }) +} + +func getUserAndFileIDs(c *gin.Context) (int64, int64) { + fileID, _ := strconv.ParseInt(c.Param("fileID"), 10, 64) + userID := auth.GetUserID(c.Request.Header) + return userID, fileID +} + +// logBadRedirect will log the request id if we are redirecting to another url with the auth-token in header +func (h *FileHandler) logBadRedirect(c *gin.Context) { + if len(c.GetHeader("X-Auth-Token")) != 0 && os.Getenv("ENVIRONMENT") != "" { + log.WithField("req_id", requestid.Get(c)).Error("critical: sending token to another service") + } +} diff --git a/server/pkg/api/healthcheck.go b/server/pkg/api/healthcheck.go new file mode 100644 index 000000000..39f53c311 --- /dev/null +++ b/server/pkg/api/healthcheck.go @@ -0,0 +1,81 @@ +package api + +import ( + "database/sql" + "fmt" + "net/http" + "os" + "time" + + "github.com/ente-io/museum/ente" + "github.com/ente-io/stacktrace" + "github.com/sirupsen/logrus" + "github.com/spf13/viper" + + "github.com/ente-io/museum/pkg/utils/config" + "github.com/ente-io/museum/pkg/utils/handler" + "github.com/gin-gonic/gin" +) + +type HealthCheckHandler struct { + DB *sql.DB +} + +func (h *HealthCheckHandler) Ping(c *gin.Context) { + res := 0 + err := h.DB.QueryRowContext(c, `SELECT 1`).Scan(&res) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + result := make(map[string]string) + result["message"] = "pong" + result["id"] = os.Getenv("GIT_COMMIT") + if c.Query("host") != "" { + result["host"], _ = os.Hostname() + } + c.JSON(http.StatusOK, result) +} + +func (h *HealthCheckHandler) PingDBStats(c *gin.Context) { + host, _ := os.Hostname() + stats := h.DB.Stats() + logrus.WithFields(logrus.Fields{ + "MaxOpenConnections": stats.MaxOpenConnections, + "Idle": stats.Idle, + "InUse": stats.InUse, + "OpenConnections": stats.OpenConnections, + "WaitCount": stats.WaitCount, + "WaitDuration": stats.WaitDuration.String(), + "MaxIdleClosed": stats.MaxIdleClosed, + "MaxIdleTimeClosed": stats.MaxIdleTimeClosed, + "MaxLifetimeClosed": stats.MaxLifetimeClosed, + }).Info("DB STATS") + + logrus.Info("DB Ping Start") + err := h.DB.Ping() + if err != nil { + logrus.WithError(err).Error("DB Ping failed") + handler.Error(c, stacktrace.Propagate(ente.NewInternalError(fmt.Sprintf("DB ping failed on %s", host)), "")) + return + } + c.Status(http.StatusOK) +} + +func (h *HealthCheckHandler) PerformHealthCheck() { + logrus.Info("Performing HC"); + healthCheckURL := viper.GetString("internal.health-check-url") + if healthCheckURL == "" { + if !config.IsLocalEnvironment() { + logrus.Error("Could not obtain health check URL in non-local environment") + } + return + } + var client = &http.Client{ + Timeout: 10 * time.Second, + } + _, err := client.Head(healthCheckURL) + if err != nil { + logrus.Error("Error performing health check", err) + } +} diff --git a/server/pkg/api/kex.go b/server/pkg/api/kex.go new file mode 100644 index 000000000..b4c13e2c8 --- /dev/null +++ b/server/pkg/api/kex.go @@ -0,0 +1,53 @@ +package api + +import ( + "errors" + "net/http" + + "github.com/ente-io/museum/ente" + kexCtrl "github.com/ente-io/museum/pkg/controller/kex" + "github.com/ente-io/museum/pkg/utils/handler" + "github.com/ente-io/stacktrace" + "github.com/gin-gonic/gin" +) + +type KexHandler struct { + Controller *kexCtrl.Controller +} + +func (h *KexHandler) AddKey(c *gin.Context) { + req := ente.AddWrappedKeyRequest{} + if err := c.ShouldBindJSON(&req); err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + + identifier, err := h.Controller.AddKey(req.WrappedKey, req.CustomIdentifier) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + + c.JSON(http.StatusOK, gin.H{ + "identifier": identifier, + }) +} + +func (h *KexHandler) GetKey(c *gin.Context) { + identifier := c.Query("identifier") + + if identifier == "" { + handler.Error(c, stacktrace.Propagate(errors.New("identifier is required"), "")) + return + } + + wrappedKey, err := h.Controller.GetKey(identifier) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + + c.JSON(http.StatusOK, gin.H{ + "wrappedKey": wrappedKey, + }) +} diff --git a/server/pkg/api/locationtag.go b/server/pkg/api/locationtag.go new file mode 100644 index 000000000..ad61b4778 --- /dev/null +++ b/server/pkg/api/locationtag.go @@ -0,0 +1,88 @@ +package api + +import ( + "fmt" + "net/http" + + "github.com/ente-io/museum/ente" + "github.com/ente-io/museum/pkg/controller/locationtag" + "github.com/ente-io/museum/pkg/utils/auth" + "github.com/ente-io/museum/pkg/utils/handler" + "github.com/ente-io/stacktrace" + "github.com/gin-gonic/gin" +) + +// LocationTagHandler expose request handlers to all location tag requests +type LocationTagHandler struct { + Controller *locationtag.Controller +} + +// Create handler for creating a new location tag +func (h *LocationTagHandler) Create(c *gin.Context) { + var request ente.LocationTag + if err := c.ShouldBindJSON(&request); err != nil { + handler.Error(c, + stacktrace.Propagate(ente.ErrBadRequest, fmt.Sprintf("Request binding failed %s", err))) + return + } + request.OwnerID = auth.GetUserID(c.Request.Header) + resp, err := h.Controller.Create(c, request) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "Failed to create locationTag")) + return + } + c.JSON(http.StatusOK, resp) +} + +// Update handler for updating location tag +func (h *LocationTagHandler) Update(c *gin.Context) { + var request ente.LocationTag + if err := c.ShouldBindJSON(&request); err != nil { + handler.Error(c, + stacktrace.Propagate(ente.ErrBadRequest, fmt.Sprintf("Request binding failed %s", err))) + return + } + request.OwnerID = auth.GetUserID(c.Request.Header) + resp, err := h.Controller.Update(c, request) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "Failed to update locationTag")) + return + } + c.JSON(http.StatusOK, gin.H{"locationTag": resp}) +} + +// Delete handler for deleting location tag +func (h *LocationTagHandler) Delete(c *gin.Context) { + var request ente.DeleteLocationTagRequest + if err := c.ShouldBindJSON(&request); err != nil { + handler.Error(c, + stacktrace.Propagate(ente.ErrBadRequest, fmt.Sprintf("Request binding failed %s", err))) + return + } + request.OwnerID = auth.GetUserID(c.Request.Header) + _, err := h.Controller.Delete(c, request) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "Failed to delete locationTag")) + return + } + c.Status(http.StatusOK) +} + +// GetDiff handler for fetching diff of location tag changes +func (h *LocationTagHandler) GetDiff(c *gin.Context) { + var request ente.GetLocationTagDiffRequest + if err := c.ShouldBindQuery(&request); err != nil { + handler.Error(c, + stacktrace.Propagate(ente.ErrBadRequest, fmt.Sprintf("Request binding failed %s", err))) + return + } + request.OwnerID = auth.GetUserID(c.Request.Header) + locationTags, err := h.Controller.GetDiff(c, request) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "Failed to fetch locationTag diff")) + return + } + c.JSON(http.StatusOK, gin.H{ + "diff": locationTags, + }) +} diff --git a/server/pkg/api/offer.go b/server/pkg/api/offer.go new file mode 100644 index 000000000..b4196f022 --- /dev/null +++ b/server/pkg/api/offer.go @@ -0,0 +1,21 @@ +package api + +import ( + "net/http" + + "github.com/ente-io/museum/pkg/controller/offer" + "github.com/gin-gonic/gin" +) + +// OfferHandler expose request handlers to all offer related requests +type OfferHandler struct { + Controller *offer.OfferController +} + +// Deprecated for now +func (h *OfferHandler) GetBlackFridayOffers(c *gin.Context) { + // Return an empty list until the next sale + c.JSON(http.StatusOK, gin.H{ + "offers": []interface{}{}, + }) +} diff --git a/server/pkg/api/passkeys.go b/server/pkg/api/passkeys.go new file mode 100644 index 000000000..1e6936119 --- /dev/null +++ b/server/pkg/api/passkeys.go @@ -0,0 +1,89 @@ +package api + +import ( + "net/http" + + "github.com/ente-io/museum/pkg/controller" + "github.com/ente-io/museum/pkg/utils/auth" + "github.com/ente-io/museum/pkg/utils/handler" + "github.com/ente-io/stacktrace" + "github.com/gin-gonic/gin" + "github.com/google/uuid" +) + +type PasskeyHandler struct { + Controller *controller.PasskeyController +} + +func (h *PasskeyHandler) GetPasskeys(c *gin.Context) { + userID := auth.GetUserID(c.Request.Header) + + passkeys, err := h.Controller.GetPasskeys(userID) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + + c.JSON(http.StatusOK, gin.H{ + "passkeys": passkeys, + }) +} + +func (h *PasskeyHandler) RenamePasskey(c *gin.Context) { + userID := auth.GetUserID(c.Request.Header) + + passkeyID := uuid.MustParse(c.Param("passkeyID")) + newName := c.Query("friendlyName") + + err := h.Controller.RenamePasskey(userID, passkeyID, newName) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + + c.JSON(http.StatusOK, gin.H{}) +} + +func (h *PasskeyHandler) DeletePasskey(c *gin.Context) { + userID := auth.GetUserID(c.Request.Header) + + passkeyID := uuid.MustParse(c.Param("passkeyID")) + + err := h.Controller.DeletePasskey(userID, passkeyID) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + + c.JSON(http.StatusOK, gin.H{}) +} + +func (h *PasskeyHandler) BeginRegistration(c *gin.Context) { + userID := auth.GetUserID(c.Request.Header) + + options, _, sessionID, err := h.Controller.BeginRegistration(userID) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + + c.JSON(http.StatusOK, gin.H{ + "options": options, + "sessionID": sessionID, + }) +} + +func (h *PasskeyHandler) FinishRegistration(c *gin.Context) { + userID := auth.GetUserID(c.Request.Header) + + friendlyName := c.Query("friendlyName") + sessionID := uuid.MustParse(c.Query("sessionID")) + + err := h.Controller.FinishRegistration(userID, friendlyName, c.Request, sessionID) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + + c.JSON(http.StatusOK, gin.H{}) +} diff --git a/server/pkg/api/public_collection.go b/server/pkg/api/public_collection.go new file mode 100644 index 000000000..7a38f4380 --- /dev/null +++ b/server/pkg/api/public_collection.go @@ -0,0 +1,169 @@ +package api + +import ( + "net/http" + "strconv" + + "github.com/ente-io/museum/pkg/controller/storagebonus" + + "github.com/ente-io/museum/ente" + "github.com/ente-io/museum/pkg/controller" + "github.com/ente-io/museum/pkg/utils/auth" + "github.com/ente-io/museum/pkg/utils/handler" + "github.com/ente-io/stacktrace" + "github.com/gin-gonic/gin" +) + +// PublicCollectionHandler exposes request handlers for publicly accessible collections +type PublicCollectionHandler struct { + Controller *controller.PublicCollectionController + FileCtrl *controller.FileController + CollectionCtrl *controller.CollectionController + StorageBonusController *storagebonus.Controller +} + +// GetThumbnail redirects the request to the file's thumbnail location +func (h *PublicCollectionHandler) GetThumbnail(c *gin.Context) { + h.getFileForType(c, ente.THUMBNAIL) +} + +// GetFile redirects the request to the file location +func (h *PublicCollectionHandler) GetFile(c *gin.Context) { + h.getFileForType(c, ente.FILE) +} + +// GetCollection redirects the request to the collection location +func (h *PublicCollectionHandler) GetCollection(c *gin.Context) { + collection, err := h.Controller.GetPublicCollection(c, false) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + referralCode, _ := h.StorageBonusController.GetOrCreateReferralCode(c, collection.Owner.ID) + c.JSON(http.StatusOK, gin.H{ + "collection": collection, + "referralCode": referralCode, + }) +} + +// GetUploadUrls returns upload Urls where files can be uploaded +func (h *PublicCollectionHandler) GetUploadUrls(c *gin.Context) { + enteApp := auth.GetApp(c) + + collection, err := h.Controller.GetPublicCollection(c, true) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + userID := collection.Owner.ID + count, _ := strconv.Atoi(c.Query("count")) + urls, err := h.FileCtrl.GetUploadURLs(c, userID, count, enteApp) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.JSON(http.StatusOK, gin.H{ + "urls": urls, + }) +} + +// GetMultipartUploadURLs returns upload Urls where files can be uploaded +func (h *PublicCollectionHandler) GetMultipartUploadURLs(c *gin.Context) { + enteApp := auth.GetApp(c) + + collection, err := h.Controller.GetPublicCollection(c, true) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + userID := collection.Owner.ID + count, _ := strconv.Atoi(c.Query("count")) + urls, err := h.FileCtrl.GetMultipartUploadURLs(c, userID, count, enteApp) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.JSON(http.StatusOK, gin.H{ + "urls": urls, + }) +} + +// CreateFile create a new file inside the collection corresponding to the public accessToken +func (h *PublicCollectionHandler) CreateFile(c *gin.Context) { + var file ente.File + if err := c.ShouldBindJSON(&file); err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + + enteApp := auth.GetApp(c) + + fileRes, err := h.Controller.CreateFile(c, file, enteApp) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.JSON(http.StatusOK, fileRes) +} + +// VerifyPassword verifies the password for given public access token and return signed jwt token if it's valid +func (h *PublicCollectionHandler) VerifyPassword(c *gin.Context) { + var req ente.VerifyPasswordRequest + if err := c.ShouldBindJSON(&req); err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + resp, err := h.Controller.VerifyPassword(c, req) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.JSON(http.StatusOK, resp) +} + +// ReportAbuse captures abuse report for a public collection +func (h *PublicCollectionHandler) ReportAbuse(c *gin.Context) { + var req ente.AbuseReportRequest + if err := c.ShouldBindJSON(&req); err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + err := h.Controller.ReportAbuse(c, req) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.Status(http.StatusOK) +} + +// GetDiff returns the diff within a collection since a timestamp +func (h *PublicCollectionHandler) GetDiff(c *gin.Context) { + sinceTime, err := strconv.ParseInt(c.Query("sinceTime"), 10, 64) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + files, hasMore, err := h.CollectionCtrl.GetPublicDiff(c, sinceTime) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.JSON(http.StatusOK, gin.H{ + "diff": files, + "hasMore": hasMore, + }) +} + +func (h *PublicCollectionHandler) getFileForType(c *gin.Context, objectType ente.ObjectType) { + fileID, err := strconv.ParseInt(c.Param("fileID"), 10, 64) + if err != nil { + handler.Error(c, stacktrace.Propagate(ente.ErrBadRequest, "")) + return + } + url, err := h.FileCtrl.GetPublicFileURL(c, fileID, objectType) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.Redirect(http.StatusTemporaryRedirect, url) +} diff --git a/server/pkg/api/push.go b/server/pkg/api/push.go new file mode 100644 index 000000000..586bee56e --- /dev/null +++ b/server/pkg/api/push.go @@ -0,0 +1,32 @@ +package api + +import ( + "net/http" + + "github.com/ente-io/museum/ente" + "github.com/ente-io/museum/pkg/controller" + "github.com/ente-io/museum/pkg/utils/auth" + "github.com/ente-io/museum/pkg/utils/handler" + "github.com/ente-io/stacktrace" + "github.com/gin-gonic/gin" +) + +// PushHandler exposes request handlers for all push related requests +type PushHandler struct { + PushController *controller.PushController +} + +func (h *PushHandler) AddToken(c *gin.Context) { + var req ente.PushTokenRequest + err := c.ShouldBindJSON(&req) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + err = h.PushController.AddToken(auth.GetUserID(c.Request.Header), req) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.JSON(http.StatusOK, gin.H{}) +} diff --git a/server/pkg/api/remotestore.go b/server/pkg/api/remotestore.go new file mode 100644 index 000000000..ea6e621a3 --- /dev/null +++ b/server/pkg/api/remotestore.go @@ -0,0 +1,51 @@ +package api + +import ( + "fmt" + "net/http" + + "github.com/ente-io/museum/ente" + "github.com/ente-io/museum/pkg/controller/remotestore" + "github.com/ente-io/museum/pkg/utils/handler" + "github.com/ente-io/stacktrace" + "github.com/gin-gonic/gin" +) + +// RemoteStoreHandler expose request handlers to all remote store +type RemoteStoreHandler struct { + Controller *remotestore.Controller +} + +// InsertOrUpdate handler for inserting or updating key +func (h *RemoteStoreHandler) InsertOrUpdate(c *gin.Context) { + var request ente.UpdateKeyValueRequest + if err := c.ShouldBindJSON(&request); err != nil { + handler.Error(c, + stacktrace.Propagate(ente.ErrBadRequest, fmt.Sprintf("Request binding failed %s", err))) + return + } + + err := h.Controller.InsertOrUpdate(c, request) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "failed to update key's value")) + return + } + c.Status(http.StatusOK) +} + +// GetKey handler for fetching a value for particular key +func (h *RemoteStoreHandler) GetKey(c *gin.Context) { + var request ente.GetValueRequest + if err := c.ShouldBindQuery(&request); err != nil { + handler.Error(c, + stacktrace.Propagate(ente.ErrBadRequest, fmt.Sprintf("Request binding failed %s", err))) + return + } + + resp, err := h.Controller.Get(c, request) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "failed to get key value")) + return + } + c.JSON(http.StatusOK, resp) +} diff --git a/server/pkg/api/storage_bonus.go b/server/pkg/api/storage_bonus.go new file mode 100644 index 000000000..4f2ba1046 --- /dev/null +++ b/server/pkg/api/storage_bonus.go @@ -0,0 +1,49 @@ +package api + +import ( + "net/http" + + "github.com/ente-io/museum/ente" + "github.com/ente-io/museum/pkg/controller/storagebonus" + "github.com/ente-io/museum/pkg/utils/auth" + "github.com/ente-io/museum/pkg/utils/handler" + "github.com/ente-io/stacktrace" + "github.com/gin-gonic/gin" +) + +type StorageBonusHandler struct { + Controller *storagebonus.Controller +} + +func (h StorageBonusHandler) GetReferralView(context *gin.Context) { + response, err := h.Controller.GetUserReferralView(context) + if err != nil { + handler.Error(context, stacktrace.Propagate(err, "")) + return + } + context.JSON(http.StatusOK, response) +} + +func (h StorageBonusHandler) GetStorageBonusDetails(context *gin.Context) { + response, err := h.Controller.GetStorageBonusDetailResponse(context, auth.GetUserID(context.Request.Header)) + if err != nil { + handler.Error(context, stacktrace.Propagate(err, "")) + return + } + context.JSON(http.StatusOK, response) +} + +func (h StorageBonusHandler) ClaimReferral(c *gin.Context) { + code := c.Query("code") + if code == "" { + handler.Error(c, stacktrace.Propagate(ente.ErrBadRequest, "referral code is required")) + return + } + err := h.Controller.ApplyReferralCode(c, code) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.Status(http.StatusOK) + +} diff --git a/server/pkg/api/trash.go b/server/pkg/api/trash.go new file mode 100644 index 000000000..f193100bf --- /dev/null +++ b/server/pkg/api/trash.go @@ -0,0 +1,86 @@ +package api + +import ( + "net/http" + "strconv" + + "github.com/ente-io/museum/ente" + "github.com/ente-io/museum/pkg/controller" + "github.com/ente-io/museum/pkg/utils/auth" + "github.com/ente-io/museum/pkg/utils/handler" + "github.com/ente-io/stacktrace" + "github.com/gin-gonic/gin" +) + +// TrashHandler handles endpoints related to trash/restore etc +type TrashHandler struct { + Controller *controller.TrashController +} + +// GetDiff returns the list of trashed files for the user that +// have changed sinceTime. +// Deprecated, shutdown when there's no traffic for 30 days +func (t *TrashHandler) GetDiff(c *gin.Context) { + enteApp := auth.GetApp(c) + + userID := auth.GetUserID(c.Request.Header) + sinceTime, _ := strconv.ParseInt(c.Query("sinceTime"), 10, 64) + diff, hasMore, err := t.Controller.GetDiff(userID, sinceTime, false, enteApp) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.JSON(http.StatusOK, gin.H{ + "diff": diff, + "hasMore": hasMore, + }) +} + +func (t *TrashHandler) GetDiffV2(c *gin.Context) { + enteApp := auth.GetApp(c) + + userID := auth.GetUserID(c.Request.Header) + sinceTime, _ := strconv.ParseInt(c.Query("sinceTime"), 10, 64) + diff, hasMore, err := t.Controller.GetDiff(userID, sinceTime, true, enteApp) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.JSON(http.StatusOK, gin.H{ + "diff": diff, + "hasMore": hasMore, + }) +} + +// Delete files permanently, queues up the file for deletion & free up the space based on file's object size +func (t *TrashHandler) Delete(c *gin.Context) { + userID := auth.GetUserID(c.Request.Header) + var request ente.DeleteTrashFilesRequest + if err := c.ShouldBindJSON(&request); err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + request.OwnerID = userID + err := t.Controller.Delete(c, request) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.Status(http.StatusOK) +} + +// Empty deletes eligible files from the trash +func (t *TrashHandler) Empty(c *gin.Context) { + userID := auth.GetUserID(c.Request.Header) + var request ente.EmptyTrashRequest + if err := c.ShouldBindJSON(&request); err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + err := t.Controller.EmptyTrash(c, userID, request) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.Status(http.StatusOK) +} diff --git a/server/pkg/api/user.go b/server/pkg/api/user.go new file mode 100644 index 000000000..554f6654b --- /dev/null +++ b/server/pkg/api/user.go @@ -0,0 +1,591 @@ +package api + +import ( + "database/sql" + "errors" + "fmt" + "net/http" + "strconv" + "strings" + + "github.com/ente-io/museum/ente" + "github.com/ente-io/museum/ente/jwt" + "github.com/ente-io/museum/pkg/controller/user" + "github.com/ente-io/museum/pkg/utils/auth" + "github.com/ente-io/museum/pkg/utils/handler" + "github.com/ente-io/stacktrace" + "github.com/gin-gonic/gin" + "github.com/google/uuid" +) + +// UserHandler exposes request handlers for all user related requests +type UserHandler struct { + UserController *user.UserController +} + +// SendOTT generates and sends an OTT to the provided email address +func (h *UserHandler) SendOTT(c *gin.Context) { + var request ente.SendOTTRequest + if err := c.ShouldBindJSON(&request); err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + email := strings.ToLower(request.Email) + if len(email) == 0 { + handler.Error(c, stacktrace.Propagate(ente.ErrBadRequest, "Email id is missing")) + return + } + err := h.UserController.SendEmailOTT(c, email, request.Client, request.Purpose) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } else { + c.Status(http.StatusOK) + } +} + +// Logout removes the auth token from (instance) cache & database. +func (h *UserHandler) Logout(c *gin.Context) { + err := h.UserController.Logout(c) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.JSON(http.StatusOK, gin.H{}) +} + +// GetDetails returns details about the requesting user +func (h *UserHandler) GetDetails(c *gin.Context) { + details, err := h.UserController.GetDetails(c) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.JSON(http.StatusOK, gin.H{ + "details": details, + }) +} + +// GetDetailsV2 returns details about the requesting user +func (h *UserHandler) GetDetailsV2(c *gin.Context) { + userID := auth.GetUserID(c.Request.Header) + fetchMemoryCount, _ := strconv.ParseBool(c.DefaultQuery("memoryCount", "true")) + + enteApp := auth.GetApp(c) + + details, err := h.UserController.GetDetailsV2(c, userID, fetchMemoryCount, enteApp) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.JSON(http.StatusOK, details) +} + +// SetAttributes sets the attributes for a user +func (h *UserHandler) SetAttributes(c *gin.Context) { + userID := auth.GetUserID(c.Request.Header) + var request ente.SetUserAttributesRequest + if err := c.ShouldBindJSON(&request); err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + err := h.UserController.SetAttributes(userID, request) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.Status(http.StatusOK) +} + +func (h *UserHandler) UpdateEmailMFA(c *gin.Context) { + userID := auth.GetUserID(c.Request.Header) + var request ente.UpdateEmailMFA + if err := c.ShouldBindJSON(&request); err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + err := h.UserController.UpdateEmailMFA(c, userID, *request.IsEnabled) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.Status(http.StatusOK) +} + +// UpdateKeys updates the user key attributes on password change +func (h *UserHandler) UpdateKeys(c *gin.Context) { + userID := auth.GetUserID(c.Request.Header) + var request ente.UpdateKeysRequest + if err := c.ShouldBindJSON(&request); err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + token := auth.GetToken(c) + err := h.UserController.UpdateKeys(c, userID, request, token) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.Status(http.StatusOK) +} + +// SetRecoveryKey sets the recovery key attributes for a user. +func (h *UserHandler) SetRecoveryKey(c *gin.Context) { + userID := auth.GetUserID(c.Request.Header) + var request ente.SetRecoveryKeyRequest + if err := c.ShouldBindJSON(&request); err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + err := h.UserController.SetRecoveryKey(userID, request) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.Status(http.StatusOK) +} + +// GetPublicKey returns the public key of a user +func (h *UserHandler) GetPublicKey(c *gin.Context) { + email := strings.ToLower(c.Query("email")) + publicKey, err := h.UserController.GetPublicKey(email) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.JSON(http.StatusOK, gin.H{ + "publicKey": publicKey, + }) +} + +// GetRoadmapURL redirects the user to the feedback page +func (h *UserHandler) GetRoadmapURL(c *gin.Context) { + userID := auth.GetUserID(c.Request.Header) + redirectURL, err := h.UserController.GetRoadmapURL(userID) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.Redirect(http.StatusTemporaryRedirect, redirectURL) +} + +// GetRoadmapURLV2 returns the jwt token attached redirect url to roadmap +func (h *UserHandler) GetRoadmapURLV2(c *gin.Context) { + userID := auth.GetUserID(c.Request.Header) + roadmapURL, err := h.UserController.GetRoadmapURL(userID) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.JSON(http.StatusOK, gin.H{ + "url": roadmapURL, + }) +} + +// GetSessionValidityV2 verifies the user's session token and returns if the user has set their keys or not +func (h *UserHandler) GetSessionValidityV2(c *gin.Context) { + userID := auth.GetUserID(c.Request.Header) + _, err := h.UserController.GetAttributes(userID) + if err == nil { + c.JSON(http.StatusOK, gin.H{ + "hasSetKeys": true, + }) + } else { + if errors.Is(err, sql.ErrNoRows) { + c.JSON(http.StatusOK, gin.H{ + "hasSetKeys": false, + }) + } else { + handler.Error(c, stacktrace.Propagate(err, "")) + } + } +} + +// VerifyEmail validates that the OTT provided in the request is valid for the +// provided email address and if yes returns the users credentials +func (h *UserHandler) VerifyEmail(c *gin.Context) { + var request ente.EmailVerificationRequest + if err := c.ShouldBindJSON(&request); err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + response, err := h.UserController.VerifyEmail(c, request) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.JSON(http.StatusOK, response) +} + +// ChangeEmail validates that the OTT provided in the request is valid for the +// provided email address and if yes updates the user's existing email address +func (h *UserHandler) ChangeEmail(c *gin.Context) { + var request ente.EmailVerificationRequest + if err := c.ShouldBindJSON(&request); err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + err := h.UserController.ChangeEmail(c, request) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.Status(http.StatusOK) +} + +// GetTwoFactorStatus returns a user's two factor status +func (h *UserHandler) GetTwoFactorStatus(c *gin.Context) { + userID := auth.GetUserID(c.Request.Header) + status, err := h.UserController.GetTwoFactorStatus(userID) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.JSON(http.StatusOK, gin.H{"status": status}) +} + +// SetupTwoFactor generates a two factor secret and sends it to user to setup his authenticator app with +func (h *UserHandler) SetupTwoFactor(c *gin.Context) { + userID := auth.GetUserID(c.Request.Header) + response, err := h.UserController.SetupTwoFactor(userID) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.JSON(http.StatusOK, response) +} + +// EnableTwoFactor handles the two factor activation request after user has setup his two factor by validing a totp request +func (h *UserHandler) EnableTwoFactor(c *gin.Context) { + userID := auth.GetUserID(c.Request.Header) + var request ente.TwoFactorEnableRequest + if err := c.ShouldBindJSON(&request); err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + err := h.UserController.EnableTwoFactor(userID, request) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.Status(http.StatusOK) +} + +// VerifyTwoFactor handles the two factor validation request +func (h *UserHandler) VerifyTwoFactor(c *gin.Context) { + var request ente.TwoFactorVerificationRequest + if err := c.ShouldBindJSON(&request); err != nil { + handler.Error(c, stacktrace.Propagate(ente.ErrBadRequest, fmt.Sprintf("Failed to bind request: %s", err))) + return + } + response, err := h.UserController.VerifyTwoFactor(c, request.SessionID, request.Code) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.JSON(http.StatusOK, response) +} + +// BeginPasskeyRegistrationCeremony handles the request to begin the passkey registration ceremony +func (h *UserHandler) BeginPasskeyAuthenticationCeremony(c *gin.Context) { + var request ente.PasskeyTwoFactorBeginAuthenticationCeremonyRequest + if err := c.ShouldBindJSON(&request); err != nil { + handler.Error(c, stacktrace.Propagate(ente.ErrBadRequest, fmt.Sprintf("Failed to bind request: %s", err))) + return + } + + userID, err := h.UserController.PasskeyRepo.GetUserIDWithPasskeyTwoFactorSession(request.SessionID) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + + user, err := h.UserController.UserRepo.Get(userID) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + + options, _, ceremonySessionID, err := h.UserController.PasskeyRepo.CreateBeginAuthenticationData(&user) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + + c.JSON(http.StatusOK, gin.H{ + "options": options, + "ceremonySessionID": ceremonySessionID, + }) +} + +func (h *UserHandler) FinishPasskeyAuthenticationCeremony(c *gin.Context) { + var request ente.PasskeyTwoFactorFinishAuthenticationCeremonyRequest + if err := c.ShouldBindQuery(&request); err != nil { + handler.Error(c, stacktrace.Propagate(ente.ErrBadRequest, fmt.Sprintf("Failed to bind request: %s", err))) + return + } + + userID, err := h.UserController.PasskeyRepo.GetUserIDWithPasskeyTwoFactorSession(request.SessionID) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + + user, err := h.UserController.UserRepo.Get(userID) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + + err = h.UserController.PasskeyRepo.FinishAuthentication(&user, c.Request, uuid.MustParse(request.CeremonySessionID)) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + + response, err := h.UserController.GetKeyAttributeAndToken(c, userID) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + + c.JSON(http.StatusOK, response) +} + +// DisableTwoFactor disables the two factor authentication for a user +func (h *UserHandler) DisableTwoFactor(c *gin.Context) { + userID := auth.GetUserID(c.Request.Header) + err := h.UserController.DisableTwoFactor(userID) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.Status(http.StatusOK) +} + +// RecoverTwoFactor handles the two factor recovery request by sending the +// recoveryKeyEncryptedTwoFactorSecret for the user to decrypt it and make twoFactor removal api call +func (h *UserHandler) RecoverTwoFactor(c *gin.Context) { + sessionID := c.Query("sessionID") + response, err := h.UserController.RecoverTwoFactor(sessionID) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.JSON(http.StatusOK, response) +} + +// RemoveTwoFactor handles two factor deactivation request if user lost his device +// by authenticating him using his twoFactorsessionToken and twoFactor secret +func (h *UserHandler) RemoveTwoFactor(c *gin.Context) { + var request ente.TwoFactorRemovalRequest + if err := c.ShouldBindJSON(&request); err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + response, err := h.UserController.RemoveTwoFactor(c, request.SessionID, request.Secret) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.JSON(http.StatusOK, response) +} + +func (h *UserHandler) ReportEvent(c *gin.Context) { + var request ente.EventReportRequest + if err := c.ShouldBindJSON(&request); err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.Status(http.StatusOK) +} + +func (h *UserHandler) GetPaymentToken(c *gin.Context) { + userID := auth.GetUserID(c.Request.Header) + token, err := h.UserController.GetJWTToken(userID, jwt.PAYMENT) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.JSON(http.StatusOK, gin.H{ + "paymentToken": token, + }) +} + +func (h *UserHandler) GetFamiliesToken(c *gin.Context) { + userID := auth.GetUserID(c.Request.Header) + token, err := h.UserController.GetJWTToken(userID, jwt.FAMILIES) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.JSON(http.StatusOK, gin.H{ + "familiesToken": token, + }) +} + +func (h *UserHandler) GetAccountsToken(c *gin.Context) { + userID := auth.GetUserID(c.Request.Header) + token, err := h.UserController.GetJWTToken(userID, jwt.ACCOUNTS) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.JSON(http.StatusOK, gin.H{ + "accountsToken": token, + }) +} + +func (h *UserHandler) GetActiveSessions(c *gin.Context) { + userID := auth.GetUserID(c.Request.Header) + sessions, err := h.UserController.GetActiveSessions(c, userID) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.JSON(http.StatusOK, gin.H{ + "sessions": sessions, + }) +} + +// TerminateSession removes the auth token from (instance) cache & database. +func (h *UserHandler) TerminateSession(c *gin.Context) { + userID := auth.GetUserID(c.Request.Header) + token := c.Query("token") + err := h.UserController.TerminateSession(userID, token) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.JSON(http.StatusOK, gin.H{}) +} + +// GetDeleteChallenge responds with flag to indicate if account deletion is enabled. +// When enabled, it returns a challenge/encrypted token which clients need to decrypt +// and send-back while confirming deletion +func (h *UserHandler) GetDeleteChallenge(c *gin.Context) { + response, err := h.UserController.GetDeleteChallengeToken(c) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.JSON(http.StatusOK, response) +} + +// DeleteUser api for deleting a user +func (h *UserHandler) DeleteUser(c *gin.Context) { + var request ente.DeleteAccountRequest + if err := c.ShouldBindJSON(&request); err != nil { + handler.Error(c, stacktrace.Propagate(err, "Could not bind request params")) + return + } + response, err := h.UserController.SelfDeleteAccount(c, request) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.JSON(http.StatusOK, response) +} + +// GetSRPAttributes returns the SRP attributes for a user +func (h *UserHandler) GetSRPAttributes(c *gin.Context) { + var request ente.GetSRPAttributesRequest + if err := c.ShouldBindQuery(&request); err != nil { + handler.Error(c, + stacktrace.Propagate(ente.ErrBadRequest, fmt.Sprintf("Request binding failed %s", err))) + return + } + response, err := h.UserController.GetSRPAttributes(c, request.Email) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.JSON(http.StatusOK, gin.H{"attributes": response}) +} + +// SetupSRP sets the SRP attributes for a user +func (h *UserHandler) SetupSRP(c *gin.Context) { + var request ente.SetupSRPRequest + if err := c.ShouldBindJSON(&request); err != nil { + handler.Error(c, + stacktrace.Propagate(ente.ErrBadRequest, fmt.Sprintf("Request binding failed %s", err))) + return + } + userID := auth.GetUserID(c.Request.Header) + resp, err := h.UserController.SetupSRP(c, userID, request) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.JSON(http.StatusOK, resp) +} + +// CompleteSRPSetup completes the SRP setup for a user +func (h *UserHandler) CompleteSRPSetup(c *gin.Context) { + var request ente.CompleteSRPSetupRequest + if err := c.ShouldBindJSON(&request); err != nil { + handler.Error(c, + stacktrace.Propagate(ente.ErrBadRequest, fmt.Sprintf("Request binding failed %s", err))) + return + } + resp, err := h.UserController.CompleteSRPSetup(c, request) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.JSON(http.StatusOK, resp) +} + +// UpdateSrpAndKeyAttributes updates the SRP setup for a user and key attributes +func (h *UserHandler) UpdateSrpAndKeyAttributes(c *gin.Context) { + var request ente.UpdateSRPAndKeysRequest + if err := c.ShouldBindJSON(&request); err != nil { + handler.Error(c, + stacktrace.Propagate(ente.ErrBadRequest, fmt.Sprintf("Request binding failed %s", err))) + return + } + userID := auth.GetUserID(c.Request.Header) + // default to true + clearTokens := true + if request.LogOutOtherDevices != nil { + clearTokens = *request.LogOutOtherDevices + } + resp, err := h.UserController.UpdateSrpAndKeyAttributes(c, userID, request, clearTokens) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.JSON(http.StatusOK, resp) +} + +// CreateSRPSession set the SRP A value on the server and returns the SRP B value to the client +func (h *UserHandler) CreateSRPSession(c *gin.Context) { + var request ente.CreateSRPSessionRequest + if err := c.ShouldBindJSON(&request); err != nil { + handler.Error(c, + stacktrace.Propagate(ente.ErrBadRequest, fmt.Sprintf("Request binding failed %s", err))) + return + } + resp, err := h.UserController.CreateSrpSession(c, request) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.JSON(http.StatusOK, resp) +} + +// VerifySRPSession checks the M1 value to determine if user actually knows the password +func (h *UserHandler) VerifySRPSession(c *gin.Context) { + var request ente.VerifySRPSessionRequest + if err := c.ShouldBindJSON(&request); err != nil { + handler.Error(c, + stacktrace.Propagate(ente.ErrBadRequest, fmt.Sprintf("Request binding failed %s", err))) + return + } + response, err := h.UserController.VerifySRPSession(c, request) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "")) + return + } + c.JSON(http.StatusOK, response) +} diff --git a/server/pkg/api/userentity.go b/server/pkg/api/userentity.go new file mode 100644 index 000000000..1d10b3464 --- /dev/null +++ b/server/pkg/api/userentity.go @@ -0,0 +1,117 @@ +package api + +import ( + "fmt" + "net/http" + + "github.com/ente-io/museum/ente" + model "github.com/ente-io/museum/ente/userentity" + userentity "github.com/ente-io/museum/pkg/controller/userentity" + "github.com/ente-io/museum/pkg/utils/handler" + "github.com/ente-io/stacktrace" + "github.com/gin-gonic/gin" + "github.com/google/uuid" +) + +// UserEntityHandler expose request handlers for various operations on user entity +type UserEntityHandler struct { + Controller *userentity.Controller +} + +// CreateKey... +func (h *UserEntityHandler) CreateKey(c *gin.Context) { + var request model.EntityKeyRequest + if err := c.ShouldBindJSON(&request); err != nil { + handler.Error(c, + stacktrace.Propagate(ente.ErrBadRequest, fmt.Sprintf("Request binding failed %s", err))) + return + } + err := h.Controller.CreateKey(c, request) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "Failed to create CreateKey")) + return + } + c.Status(http.StatusOK) +} + +// GetKey... +func (h *UserEntityHandler) GetKey(c *gin.Context) { + var request model.GetEntityKeyRequest + if err := c.ShouldBindQuery(&request); err != nil { + handler.Error(c, + stacktrace.Propagate(ente.ErrBadRequest, fmt.Sprintf("Request binding failed %s", err))) + return + } + resp, err := h.Controller.GetKey(c, request) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "Failed to create CreateKey")) + return + } + c.JSON(http.StatusOK, resp) +} + +// CreateEntity... +func (h *UserEntityHandler) CreateEntity(c *gin.Context) { + var request model.EntityDataRequest + if err := c.ShouldBindJSON(&request); err != nil { + handler.Error(c, + stacktrace.Propagate(ente.ErrBadRequest, fmt.Sprintf("Request binding failed %s", err))) + return + } + resp, err := h.Controller.CreateEntity(c, request) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "Failed to create CreateEntity")) + return + } + c.JSON(http.StatusOK, resp) +} + +// UpdateEntity... +func (h *UserEntityHandler) UpdateEntity(c *gin.Context) { + var request model.UpdateEntityDataRequest + if err := c.ShouldBindJSON(&request); err != nil { + handler.Error(c, + stacktrace.Propagate(ente.ErrBadRequest, fmt.Sprintf("Request binding failed %s", err))) + return + } + resp, err := h.Controller.UpdateEntity(c, request) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "Failed to update UpdateEntity")) + return + } + c.JSON(http.StatusOK, resp) +} + +// DeleteEntity... +func (h *UserEntityHandler) DeleteEntity(c *gin.Context) { + id, err := uuid.Parse(c.Query("id")) + if err != nil { + handler.Error(c, stacktrace.Propagate(ente.ErrBadRequest, "failed to find id")) + return + } + _, err = h.Controller.Delete(c, id) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "Failed to delete DeleteEntity")) + return + } + c.Status(http.StatusOK) +} + +// GetDiff... +func (h *UserEntityHandler) GetDiff(c *gin.Context) { + var request model.GetEntityDiffRequest + if err := c.ShouldBindQuery(&request); err != nil { + handler.Error(c, + stacktrace.Propagate(ente.ErrBadRequest, fmt.Sprintf("Request binding failed %s", err))) + return + } + + entities, err := h.Controller.GetDiff(c, request) + if err != nil { + handler.Error(c, stacktrace.Propagate(err, "Failed to fetch entityData diff")) + return + } + c.JSON(http.StatusOK, gin.H{ + "diff": entities, + }) +} diff --git a/server/pkg/controller/access/access.go b/server/pkg/controller/access/access.go new file mode 100644 index 000000000..b2acbff25 --- /dev/null +++ b/server/pkg/controller/access/access.go @@ -0,0 +1,34 @@ +package access + +import ( + "github.com/ente-io/museum/pkg/repo" + "github.com/gin-gonic/gin" +) + +// Controller exposes helper methods to perform access checks while fetching or editing +// any entity. +type Controller interface { + GetCollection(ctx *gin.Context, req *GetCollectionParams) (*GetCollectionResponse, error) + VerifyFileOwnership(ctx *gin.Context, req *VerifyFileOwnershipParams) error +} + +// controllerImpl implements Controller +type controllerImpl struct { + FileRepo *repo.FileRepository + CollectionRepo *repo.CollectionRepository +} + +// https://stackoverflow.com/a/33089540/546896 +var _ Controller = (*controllerImpl)(nil) // Verify that *T implements I. +var _ Controller = controllerImpl{} + +func NewAccessController( + collRepo *repo.CollectionRepository, + fileRepo *repo.FileRepository, +) Controller { + comp := &controllerImpl{ + CollectionRepo: collRepo, + FileRepo: fileRepo, + } + return comp +} diff --git a/server/pkg/controller/access/collection.go b/server/pkg/controller/access/collection.go new file mode 100644 index 000000000..8f0630776 --- /dev/null +++ b/server/pkg/controller/access/collection.go @@ -0,0 +1,59 @@ +package access + +import ( + "github.com/ente-io/museum/ente" + "github.com/ente-io/stacktrace" + "github.com/gin-gonic/gin" +) + +type GetCollectionParams struct { + CollectionID int64 + // userID of the user trying to fetch the controller + ActorUserID int64 + // IncludeDeleted defaults to false. If false and user is trying to fetch deletion collection + // then the request fails + IncludeDeleted bool + + // VerifyOwner deafults to false. If the flag is set to true, the method will verify that the actor actually owns the collection + VerifyOwner bool + // todo: Add accessType in params for verifying read/write/can-upload/owner types of access +} + +type GetCollectionResponse struct { + Collection ente.Collection + Role *ente.CollectionParticipantRole +} + +func (c controllerImpl) GetCollection(ctx *gin.Context, req *GetCollectionParams) (*GetCollectionResponse, error) { + collection, err := c.CollectionRepo.Get(req.CollectionID) + role := ente.UNKNOWN + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + + // Perform permission related access check if user is not the owner of the collection + if req.VerifyOwner && req.ActorUserID != collection.Owner.ID { + return nil, stacktrace.Propagate(ente.ErrPermissionDenied, "actor doesn't owns the collection") + } + + if req.ActorUserID != collection.Owner.ID { + shareeRole, err := c.CollectionRepo.GetCollectionShareeRole(req.CollectionID, req.ActorUserID) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + // Hide public URL info for non-collection owners + collection.PublicURLs = nil + role = *shareeRole + } else { + role = ente.OWNER + } + + if !req.IncludeDeleted && collection.IsDeleted { + return nil, stacktrace.Propagate(ente.ErrNotFound, "trying to access deleted collection") + } + + return &GetCollectionResponse{ + Collection: collection, + Role: &role, + }, nil +} diff --git a/server/pkg/controller/access/file.go b/server/pkg/controller/access/file.go new file mode 100644 index 000000000..9c6cfdc16 --- /dev/null +++ b/server/pkg/controller/access/file.go @@ -0,0 +1,28 @@ +package access + +import ( + "github.com/ente-io/museum/ente" + enteArray "github.com/ente-io/museum/pkg/utils/array" + "github.com/ente-io/stacktrace" + "github.com/gin-contrib/requestid" + "github.com/gin-gonic/gin" + log "github.com/sirupsen/logrus" +) + +type VerifyFileOwnershipParams struct { + // userID of the user trying to fetch the controller + ActorUserId int64 + FileIDs []int64 +} + +// VerifyFileOwnership will return error if given fileIDs are not valid or don't belong to the ownerID +func (c controllerImpl) VerifyFileOwnership(ctx *gin.Context, req *VerifyFileOwnershipParams) error { + if enteArray.ContainsDuplicateInInt64Array(req.FileIDs) { + return stacktrace.Propagate(ente.ErrBadRequest, "duplicate fileIDs") + } + ownerID := req.ActorUserId + logger := log.WithFields(log.Fields{ + "req_id": requestid.Get(ctx), + }) + return c.FileRepo.VerifyFileOwner(ctx, req.FileIDs, ownerID, logger) +} diff --git a/server/pkg/controller/appstore.go b/server/pkg/controller/appstore.go new file mode 100644 index 000000000..428ae5319 --- /dev/null +++ b/server/pkg/controller/appstore.go @@ -0,0 +1,194 @@ +package controller + +import ( + "context" + "fmt" + "github.com/ente-io/museum/pkg/controller/commonbilling" + "github.com/prometheus/common/log" + "strconv" + "strings" + + "github.com/ente-io/stacktrace" + "github.com/gin-contrib/requestid" + "github.com/gin-gonic/gin" + "github.com/sirupsen/logrus" + "github.com/spf13/viper" + + "github.com/awa/go-iap/appstore" + "github.com/ente-io/museum/ente" + "github.com/ente-io/museum/pkg/repo" + "github.com/ente-io/museum/pkg/utils/array" +) + +// AppStoreController provides abstractions for handling billing on AppStore +type AppStoreController struct { + AppStoreClient appstore.Client + BillingRepo *repo.BillingRepository + FileRepo *repo.FileRepository + UserRepo *repo.UserRepository + BillingPlansPerCountry ente.BillingPlansPerCountry + CommonBillCtrl *commonbilling.Controller + // appStoreSharedPassword is the password to be used to access AppStore APIs + appStoreSharedPassword string +} + +// Return a new instance of AppStoreController +func NewAppStoreController( + plans ente.BillingPlansPerCountry, + billingRepo *repo.BillingRepository, + fileRepo *repo.FileRepository, + userRepo *repo.UserRepository, + commonBillCtrl *commonbilling.Controller, +) *AppStoreController { + appleSharedSecret := viper.GetString("apple.shared-secret") + return &AppStoreController{ + AppStoreClient: *appstore.New(), + BillingRepo: billingRepo, + FileRepo: fileRepo, + UserRepo: userRepo, + BillingPlansPerCountry: plans, + appStoreSharedPassword: appleSharedSecret, + CommonBillCtrl: commonBillCtrl, + } +} + +var SubsUpdateNotificationTypes = []string{string(appstore.NotificationTypeDidChangeRenewalStatus), string(appstore.NotificationTypeCancel), string(appstore.NotificationTypeDidRevoke)} + +// HandleNotification handles an AppStore notification +func (c *AppStoreController) HandleNotification(ctx *gin.Context, notification appstore.SubscriptionNotification) error { + logger := logrus.WithFields(logrus.Fields{ + "req_id": requestid.Get(ctx), + }) + purchase, err := c.verifyAppStoreSubscription(notification.UnifiedReceipt.LatestReceipt) + if err != nil { + return stacktrace.Propagate(err, "") + } + latestReceiptInfo := c.getLatestReceiptInfo(purchase.LatestReceiptInfo) + if latestReceiptInfo.TransactionID == latestReceiptInfo.OriginalTransactionID && !array.StringInList(string(notification.NotificationType), SubsUpdateNotificationTypes) { + var logMsg = fmt.Sprintf("Ignoring notification of type %s", notification.NotificationType) + if notification.NotificationType != appstore.NotificationTypeInitialBuy { + // log unexpected notification types + logger.Error(logMsg) + } else { + logger.Info(logMsg) + } + // First subscription, no user to link to + return nil + } + subscription, err := c.BillingRepo.GetSubscriptionForTransaction(latestReceiptInfo.OriginalTransactionID, ente.AppStore) + if err != nil { + return stacktrace.Propagate(err, "") + } + expiryTimeInMillis, _ := strconv.ParseInt(latestReceiptInfo.ExpiresDate.ExpiresDateMS, 10, 64) + if latestReceiptInfo.ProductID == subscription.ProductID && expiryTimeInMillis*1000 < subscription.ExpiryTime { + // Outdated notification, no-op + } else { + if latestReceiptInfo.ProductID != subscription.ProductID { + var newPlan ente.BillingPlan + plans := c.BillingPlansPerCountry["EU"] // Country code is irrelevant since Storage will be the same for a given subscriptionID + for _, plan := range plans { + if plan.IOSID == latestReceiptInfo.ProductID { + newPlan = plan + break + } + } + if newPlan.Storage < subscription.Storage { // Downgrade + canDowngrade, canDowngradeErr := c.CommonBillCtrl.CanDowngradeToGivenStorage(newPlan.Storage, subscription.UserID) + if canDowngradeErr != nil { + return stacktrace.Propagate(canDowngradeErr, "") + } + if !canDowngrade { + return stacktrace.Propagate(ente.ErrCannotDowngrade, "") + } + log.Info("Usage is good") + } + newSubscription := ente.Subscription{ + Storage: newPlan.Storage, + ExpiryTime: expiryTimeInMillis * 1000, + ProductID: latestReceiptInfo.ProductID, + PaymentProvider: ente.AppStore, + OriginalTransactionID: latestReceiptInfo.OriginalTransactionID, + Attributes: ente.SubscriptionAttributes{LatestVerificationData: notification.UnifiedReceipt.LatestReceipt}, + } + err = c.BillingRepo.ReplaceSubscription( + subscription.ID, + newSubscription, + ) + if err != nil { + return stacktrace.Propagate(err, "") + } + } else { + if notification.NotificationType == appstore.NotificationTypeDidChangeRenewalStatus { + err := c.BillingRepo.UpdateSubscriptionCancellationStatus(subscription.UserID, notification.AutoRenewStatus == "false") + if err != nil { + return stacktrace.Propagate(err, "") + } + } else if notification.NotificationType == appstore.NotificationTypeCancel || notification.NotificationType == appstore.NotificationTypeDidRevoke { + err := c.BillingRepo.UpdateSubscriptionCancellationStatus(subscription.UserID, true) + if err != nil { + return stacktrace.Propagate(err, "") + } + } + err = c.BillingRepo.UpdateSubscriptionExpiryTime(subscription.ID, expiryTimeInMillis*1000) + if err != nil { + return stacktrace.Propagate(err, "") + } + } + } + err = c.BillingRepo.LogAppStorePush(subscription.UserID, notification, *purchase) + return stacktrace.Propagate(err, "") +} + +// GetVerifiedSubscription verifies and returns the verified subscription +func (c *AppStoreController) GetVerifiedSubscription(userID int64, productID string, verificationData string) (ente.Subscription, error) { + var s ente.Subscription + s.UserID = userID + s.ProductID = productID + s.PaymentProvider = ente.AppStore + s.Attributes.LatestVerificationData = verificationData + plans := c.BillingPlansPerCountry["EU"] // Country code is irrelevant since Storage will be the same for a given subscriptionID + + response, err := c.verifyAppStoreSubscription(verificationData) + if err != nil { + return ente.Subscription{}, stacktrace.Propagate(err, "") + } + for _, plan := range plans { + if plan.IOSID == productID { + s.Storage = plan.Storage + break + } + } + latestReceiptInfo := c.getLatestReceiptInfo(response.LatestReceiptInfo) + s.OriginalTransactionID = latestReceiptInfo.OriginalTransactionID + expiryTime, _ := strconv.ParseInt(latestReceiptInfo.ExpiresDate.ExpiresDateMS, 10, 64) + s.ExpiryTime = expiryTime * 1000 + return s, nil +} + +// VerifyAppStoreSubscription verifies an AppStore subscription +func (c *AppStoreController) verifyAppStoreSubscription(verificationData string) (*appstore.IAPResponse, error) { + iapRequest := appstore.IAPRequest{ + ReceiptData: verificationData, + Password: c.appStoreSharedPassword, + } + response := &appstore.IAPResponse{} + context := context.Background() + err := c.AppStoreClient.Verify(context, iapRequest, response) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + if response.Status != 0 { + return nil, ente.ErrBadRequest + } + return response, nil +} + +func (c *AppStoreController) getLatestReceiptInfo(receiptInfo []appstore.InApp) appstore.InApp { + latestReceiptInfo := receiptInfo[0] + for _, receiptInfo := range receiptInfo { + if strings.Compare(latestReceiptInfo.ExpiresDate.ExpiresDateMS, receiptInfo.ExpiresDate.ExpiresDateMS) < 0 { + latestReceiptInfo = receiptInfo + } + } + return latestReceiptInfo +} diff --git a/server/pkg/controller/authenticator/controller.go b/server/pkg/controller/authenticator/controller.go new file mode 100644 index 000000000..a89b5047f --- /dev/null +++ b/server/pkg/controller/authenticator/controller.go @@ -0,0 +1,64 @@ +package authenticaor + +import ( + model "github.com/ente-io/museum/ente/authenticator" + "github.com/ente-io/museum/pkg/repo/authenticator" + "github.com/ente-io/museum/pkg/utils/auth" + "github.com/ente-io/stacktrace" + "github.com/google/uuid" + + "github.com/gin-gonic/gin" +) + +// Controller is interface for exposing business logic related to authenticator app +type Controller struct { + Repo *authenticator.Repository +} + +// CreateKey... +func (c *Controller) CreateKey(ctx *gin.Context, req model.CreateKeyRequest) error { + userID := auth.GetUserID(ctx.Request.Header) + return c.Repo.CreateKey(ctx, userID, req) +} + +// GetKey... +func (c *Controller) GetKey(ctx *gin.Context) (*model.Key, error) { + userID := auth.GetUserID(ctx.Request.Header) + res, err := c.Repo.GetKey(ctx, userID) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + return &res, nil +} + +// CreateEntity... +func (c *Controller) CreateEntity(ctx *gin.Context, req model.CreateEntityRequest) (*model.Entity, error) { + userID := auth.GetUserID(ctx.Request.Header) + id, err := c.Repo.Create(ctx, userID, req) + if err != nil { + return nil, stacktrace.Propagate(err, "failed to createEntity") + } + entity, err := c.Repo.Get(ctx, userID, id) + if err != nil { + return nil, stacktrace.Propagate(err, "failed to createEntity") + } + return &entity, nil +} + +// UpdateEntity... +func (c *Controller) UpdateEntity(ctx *gin.Context, req model.UpdateEntityRequest) error { + userID := auth.GetUserID(ctx.Request.Header) + return c.Repo.Update(ctx, userID, req) +} + +// Delete... +func (c *Controller) Delete(ctx *gin.Context, entityID uuid.UUID) (bool, error) { + userID := auth.GetUserID(ctx.Request.Header) + return c.Repo.Delete(ctx, userID, entityID) +} + +// GetDiff... +func (c *Controller) GetDiff(ctx *gin.Context, req model.GetEntityDiffRequest) ([]model.Entity, error) { + userID := auth.GetUserID(ctx.Request.Header) + return c.Repo.GetDiff(ctx, userID, *req.SinceTime, req.Limit) +} diff --git a/server/pkg/controller/billing.go b/server/pkg/controller/billing.go new file mode 100644 index 000000000..228a3344b --- /dev/null +++ b/server/pkg/controller/billing.go @@ -0,0 +1,497 @@ +package controller + +import ( + "context" + "database/sql" + "errors" + "fmt" + "github.com/ente-io/museum/pkg/controller/commonbilling" + "strconv" + + "github.com/ente-io/museum/pkg/repo/storagebonus" + + "github.com/ente-io/museum/pkg/controller/discord" + "github.com/ente-io/museum/pkg/controller/email" + "github.com/ente-io/museum/pkg/utils/array" + "github.com/ente-io/museum/pkg/utils/billing" + "github.com/ente-io/museum/pkg/utils/network" + "github.com/ente-io/museum/pkg/utils/time" + "github.com/ente-io/stacktrace" + "github.com/gin-gonic/gin" + log "github.com/sirupsen/logrus" + "github.com/spf13/viper" + + "github.com/ente-io/museum/ente" + "github.com/ente-io/museum/pkg/repo" +) + +// BillingController provides abstractions for handling billing related queries +type BillingController struct { + BillingPlansPerAccount ente.BillingPlansPerAccount + BillingRepo *repo.BillingRepository + UserRepo *repo.UserRepository + UsageRepo *repo.UsageRepository + StorageBonusRepo *storagebonus.Repository + AppStoreController *AppStoreController + PlayStoreController *PlayStoreController + StripeController *StripeController + DiscordController *discord.DiscordController + EmailNotificationCtrl *email.EmailNotificationController + CommonBillCtrl *commonbilling.Controller +} + +// Return a new instance of BillingController +func NewBillingController( + plans ente.BillingPlansPerAccount, + appStoreController *AppStoreController, + playStoreController *PlayStoreController, + stripeController *StripeController, + discordController *discord.DiscordController, + emailNotificationCtrl *email.EmailNotificationController, + billingRepo *repo.BillingRepository, + userRepo *repo.UserRepository, + usageRepo *repo.UsageRepository, + storageBonusRepo *storagebonus.Repository, + commonBillCtrl *commonbilling.Controller, +) *BillingController { + return &BillingController{ + BillingPlansPerAccount: plans, + BillingRepo: billingRepo, + UserRepo: userRepo, + UsageRepo: usageRepo, + AppStoreController: appStoreController, + PlayStoreController: playStoreController, + StripeController: stripeController, + DiscordController: discordController, + EmailNotificationCtrl: emailNotificationCtrl, + StorageBonusRepo: storageBonusRepo, + CommonBillCtrl: commonBillCtrl, + } +} + +// GetPlansV2 returns the available subscription plans for the given country and stripe account +func (c *BillingController) GetPlansV2(countryCode string, stripeAccountCountry ente.StripeAccountCountry) []ente.BillingPlan { + plans := c.getAllPlans(countryCode, stripeAccountCountry) + result := make([]ente.BillingPlan, 0) + ids := billing.GetActivePlanIDs() + for _, plan := range plans { + if contains(ids, plan.ID) { + result = append(result, plan) + } + } + return result +} + +// GetStripeAccountCountry returns the stripe account country the user's existing plan is from +// if he doesn't have a stripe subscription then ente.DefaultStripeAccountCountry is returned +func (c *BillingController) GetStripeAccountCountry(userID int64) (ente.StripeAccountCountry, error) { + stipeSubInfo, hasStripeSub, err := c.GetUserStripeSubscriptionInfo(userID) + if err != nil { + return "", stacktrace.Propagate(err, "") + } + if hasStripeSub { + return stipeSubInfo.AccountCountry, nil + } else { + //if user doesn't have a stripe subscription, return the default stripe account country + return ente.DefaultStripeAccountCountry, nil + } +} + +// GetUserPlans returns the active plans for a user +func (c *BillingController) GetUserPlans(ctx *gin.Context, userID int64) ([]ente.BillingPlan, error) { + stripeSubInfo, hasStripeSub, err := c.GetUserStripeSubscriptionInfo(userID) + if err != nil { + return []ente.BillingPlan{}, stacktrace.Propagate(err, "Failed to get user's subscription country and stripe account") + } + if hasStripeSub { + return c.GetPlansV2(stripeSubInfo.PlanCountry, stripeSubInfo.AccountCountry), nil + } else { + // user doesn't have a stipe subscription, so return the default account plans for the country the user is from + return c.GetPlansV2(network.GetClientCountry(ctx), ente.DefaultStripeAccountCountry), nil + } +} + +// GetSubscription returns the current subscription for a user if any +func (c *BillingController) GetSubscription(ctx *gin.Context, userID int64) (ente.Subscription, error) { + s, err := c.BillingRepo.GetUserSubscription(userID) + if err != nil { + return ente.Subscription{}, stacktrace.Propagate(err, "") + } + plan, err := c.getPlanForCountry(s, network.GetClientCountry(ctx)) + if err != nil { + return ente.Subscription{}, stacktrace.Propagate(err, "") + } + s.Price = plan.Price + s.Period = plan.Period + return s, nil +} + +func (c *BillingController) GetRedirectURL(ctx *gin.Context) (string, error) { + whitelistedRedirectURLs := viper.GetStringSlice("stripe.whitelisted-redirect-urls") + redirectURL := ctx.Query("redirectURL") + if len(redirectURL) > 0 && redirectURL[len(redirectURL)-1:] == "/" { // Ignore the trailing slash + redirectURL = redirectURL[:len(redirectURL)-1] + } + for _, ar := range whitelistedRedirectURLs { + if ar == redirectURL { + return ar, nil + } + } + return "", stacktrace.Propagate(ente.ErrBadRequest, fmt.Sprintf("not a whitelistedRedirectURL- %s", redirectURL)) +} + +// GetActiveSubscription returns user's active subscription or throws a error if no active subscription +func (c *BillingController) GetActiveSubscription(userID int64) (ente.Subscription, error) { + subscription, err := c.BillingRepo.GetUserSubscription(userID) + if errors.Is(err, sql.ErrNoRows) { + return subscription, ente.ErrNoActiveSubscription + } + if err != nil { + return subscription, stacktrace.Propagate(err, "") + } + expiryBuffer := int64(0) + if value, ok := billing.ProviderToExpiryGracePeriodMap[subscription.PaymentProvider]; ok { + expiryBuffer = value + } + if (subscription.ExpiryTime + expiryBuffer) < time.Microseconds() { + return subscription, ente.ErrNoActiveSubscription + } + return subscription, nil +} + +// IsActivePayingSubscriber validates if the current user is paying customer with active subscription +func (c *BillingController) IsActivePayingSubscriber(userID int64) error { + subscription, err := c.GetActiveSubscription(userID) + var subErr error + if err != nil { + subErr = stacktrace.Propagate(err, "") + } else if !billing.IsActivePaidPlan(subscription) { + subErr = ente.ErrSharingDisabledForFreeAccounts + } + if subErr != nil && (errors.Is(subErr, ente.ErrNoActiveSubscription) || errors.Is(subErr, ente.ErrSharingDisabledForFreeAccounts)) { + storage, storeErr := c.StorageBonusRepo.GetPaidAddonSurplusStorage(context.Background(), userID) + if storeErr != nil { + return storeErr + } + if *storage > 0 { + return nil + } + } + return nil +} + +// HasActiveSelfOrFamilySubscription validates if the user or user's family admin has active subscription +func (c *BillingController) HasActiveSelfOrFamilySubscription(userID int64) error { + var subscriptionUserID int64 + familyAdminID, err := c.UserRepo.GetFamilyAdminID(userID) + if err != nil { + return stacktrace.Propagate(err, "") + } + if familyAdminID != nil { + subscriptionUserID = *familyAdminID + } else { + subscriptionUserID = userID + } + _, err = c.GetActiveSubscription(subscriptionUserID) + if err != nil { + if errors.Is(err, ente.ErrNoActiveSubscription) { + storage, storeErr := c.StorageBonusRepo.GetPaidAddonSurplusStorage(context.Background(), subscriptionUserID) + if storeErr != nil { + return storeErr + } + if *storage > 0 { + return nil + } + } + return stacktrace.Propagate(err, "") + } + return nil +} + +func (c *BillingController) GetUserStripeSubscriptionInfo(userID int64) (ente.StripeSubscriptionInfo, bool, error) { + s, err := c.BillingRepo.GetUserSubscription(userID) + if err != nil { + return ente.StripeSubscriptionInfo{}, false, stacktrace.Propagate(err, "") + } + // skipping country code extraction for non-stripe subscriptions + // as they have same product id across countries and hence can't be distinquished + if s.PaymentProvider != ente.Stripe { + return ente.StripeSubscriptionInfo{}, false, nil + } + _, countryCode, err := c.getPlanWithCountry(s) + if err != nil { + return ente.StripeSubscriptionInfo{}, false, stacktrace.Propagate(err, "") + } + return ente.StripeSubscriptionInfo{PlanCountry: countryCode, AccountCountry: s.Attributes.StripeAccountCountry}, true, nil +} + +// VerifySubscription verifies and returns the verified subscription +func (c *BillingController) VerifySubscription( + userID int64, + paymentProvider ente.PaymentProvider, + productID string, + verificationData string) (ente.Subscription, error) { + if productID == ente.FreePlanProductID { + return c.BillingRepo.GetUserSubscription(userID) + } + var newSubscription ente.Subscription + var err error + switch paymentProvider { + case ente.PlayStore: + newSubscription, err = c.PlayStoreController.GetVerifiedSubscription(userID, productID, verificationData) + case ente.AppStore: + newSubscription, err = c.AppStoreController.GetVerifiedSubscription(userID, productID, verificationData) + case ente.Stripe: + newSubscription, err = c.StripeController.GetVerifiedSubscription(userID, verificationData) + default: + err = stacktrace.Propagate(ente.ErrBadRequest, "") + } + if err != nil { + return ente.Subscription{}, stacktrace.Propagate(err, "") + } + currentSubscription, err := c.BillingRepo.GetUserSubscription(userID) + if err != nil { + return ente.Subscription{}, stacktrace.Propagate(err, "") + } + newSubscriptionExpiresSooner := newSubscription.ExpiryTime < currentSubscription.ExpiryTime + isUpgradingFromFreePlan := currentSubscription.ProductID == ente.FreePlanProductID + hasChangedProductID := currentSubscription.ProductID != newSubscription.ProductID + isOutdatedPurchase := !isUpgradingFromFreePlan && !hasChangedProductID && newSubscriptionExpiresSooner + if isOutdatedPurchase { + // User is reporting an outdated purchase that was already verified + // no-op + log.Info("Outdated purchase reported") + return currentSubscription, nil + } + if newSubscription.Storage < currentSubscription.Storage { + canDowngrade, canDowngradeErr := c.CommonBillCtrl.CanDowngradeToGivenStorage(newSubscription.Storage, userID) + if canDowngradeErr != nil { + return ente.Subscription{}, stacktrace.Propagate(canDowngradeErr, "") + } + if !canDowngrade { + return ente.Subscription{}, stacktrace.Propagate(ente.ErrCannotDowngrade, "") + } + log.Info("Usage is good") + } + if newSubscription.OriginalTransactionID != "" && newSubscription.OriginalTransactionID != "none" { + existingSub, existingSubErr := c.BillingRepo.GetSubscriptionForTransaction(newSubscription.OriginalTransactionID, paymentProvider) + if existingSubErr != nil { + if errors.Is(existingSubErr, sql.ErrNoRows) { + log.Info("No subscription created yet") + } else { + log.Info("Something went wrong") + log.WithError(existingSubErr).Error("GetSubscriptionForTransaction failed") + return ente.Subscription{}, stacktrace.Propagate(existingSubErr, "") + } + } else { + if existingSub.UserID != userID { + log.WithFields(log.Fields{ + "original_transaction_id": existingSub.OriginalTransactionID, + "existing_user": existingSub.UserID, + "current_user": userID, + }).Error("Subscription for given transactionID is attached with different user") + log.Info("Subscription attached to different user") + return ente.Subscription{}, stacktrace.Propagate(&ente.ErrSubscriptionAlreadyClaimed, + fmt.Sprintf("Subscription with txn id %s already associated with user %d", newSubscription.OriginalTransactionID, existingSub.UserID)) + } + } + } + err = c.BillingRepo.ReplaceSubscription( + currentSubscription.ID, + newSubscription, + ) + if err != nil { + return ente.Subscription{}, stacktrace.Propagate(err, "") + } + log.Info("Replaced subscription") + newSubscription.ID = currentSubscription.ID + if paymentProvider == ente.PlayStore && + newSubscription.OriginalTransactionID != currentSubscription.OriginalTransactionID { + // Acknowledge to PlayStore in case of upgrades/downgrades/renewals + err = c.PlayStoreController.AcknowledgeSubscription(newSubscription.ProductID, verificationData) + if err != nil { + log.Error("Error acknowledging subscription ", err) + } + } + if isUpgradingFromFreePlan { + go func() { + amount := "unknown" + plan, _, err := c.getPlanWithCountry(newSubscription) + if err != nil { + log.Error(err) + } else { + amount = plan.Price + } + c.DiscordController.NotifyNewSub(userID, string(paymentProvider), amount) + }() + go func() { + c.EmailNotificationCtrl.OnAccountUpgrade(userID) + }() + } + log.Info("Returning new subscription with ID " + strconv.FormatInt(newSubscription.ID, 10)) + return newSubscription, nil +} + +func (c *BillingController) getAllPlans(countryCode string, stripeAccountCountry ente.StripeAccountCountry) []ente.BillingPlan { + if array.StringInList(countryCode, billing.CountriesInEU) { + countryCode = "EU" + } + countryWisePlans := c.BillingPlansPerAccount[stripeAccountCountry] + if plans, found := countryWisePlans[countryCode]; found { + return plans + } + // unable to find plans for given country code, return plans for default country + defaultCountry := billing.GetDefaultPlanCountry() + return countryWisePlans[defaultCountry] +} + +func (c *BillingController) UpdateBillingEmail(userID int64, newEmail string) error { + subscription, err := c.BillingRepo.GetUserSubscription(userID) + if err != nil { + return stacktrace.Propagate(err, "") + } + hasStripeSubscription := subscription.PaymentProvider == ente.Stripe + if hasStripeSubscription { + err = c.StripeController.UpdateBillingEmail(subscription, newEmail) + if err != nil { + return stacktrace.Propagate(err, "") + } + } + return nil +} + +func (c *BillingController) UpdateSubscription(r ente.UpdateSubscriptionRequest) error { + subscription, err := c.BillingRepo.GetUserSubscription(r.UserID) + if err != nil { + return stacktrace.Propagate(err, "") + } + newSubscription := ente.Subscription{ + Storage: r.Storage, + ExpiryTime: r.ExpiryTime, + ProductID: r.ProductID, + PaymentProvider: r.PaymentProvider, + OriginalTransactionID: r.TransactionID, + Attributes: r.Attributes, + } + err = c.BillingRepo.ReplaceSubscription(subscription.ID, newSubscription) + if err != nil { + return stacktrace.Propagate(err, "") + } + err = c.BillingRepo.LogAdminTriggeredSubscriptionUpdate(r) + return stacktrace.Propagate(err, "") +} + +func (c *BillingController) HandleAccountDeletion(ctx context.Context, userID int64, logger *log.Entry) (isCancelled bool, err error) { + logger.Info("updating billing on account deletion") + subscription, err := c.BillingRepo.GetUserSubscription(userID) + if err != nil { + return false, stacktrace.Propagate(err, "") + } + billingLogger := logger.WithFields(log.Fields{ + "customer_id": subscription.Attributes.CustomerID, + "is_cancelled": subscription.Attributes.IsCancelled, + "original_txn_id": subscription.OriginalTransactionID, + "payment_provider": subscription.PaymentProvider, + "product_id": subscription.ProductID, + "stripe_account_country": subscription.Attributes.StripeAccountCountry, + }) + billingLogger.Info("subscription fetched") + // user on free plan, no action required + if subscription.ProductID == ente.FreePlanProductID { + billingLogger.Info("user on free plan") + return true, nil + } + // The word "family" here is a misnomer - these are some manually created + // accounts for very early adopters, and are unrelated to Family Plans. + // Cancelation of these accounts will require manual intervention. Ideally, + // we should never be deleting such accounts. + if subscription.ProductID == ente.FamilyPlanProductID || subscription.ProductID == "" { + return false, stacktrace.NewError(fmt.Sprintf("unexpected product id %s", subscription.ProductID), "") + } + isCancelled = subscription.Attributes.IsCancelled + // delete customer data from Stripe if user is on paid plan. + if subscription.PaymentProvider == ente.Stripe { + err = c.StripeController.CancelSubAndDeleteCustomer(subscription, billingLogger) + if err != nil { + return false, stacktrace.Propagate(err, "") + } + // on customer deletion, subscription is automatically cancelled + isCancelled = true + } else if subscription.PaymentProvider == ente.AppStore || subscription.PaymentProvider == ente.PlayStore { + logger.Info("Updating originalTransactionID for app/playStore provider") + err := c.BillingRepo.UpdateTransactionIDOnDeletion(userID) + if err != nil { + return false, stacktrace.Propagate(err, "") + } + } + return isCancelled, nil +} + +func (c *BillingController) getPlanWithCountry(s ente.Subscription) (ente.BillingPlan, string, error) { + var allPlans ente.BillingPlansPerCountry + if s.PaymentProvider == ente.Stripe { + allPlans = c.BillingPlansPerAccount[s.Attributes.StripeAccountCountry] + } else { + allPlans = c.BillingPlansPerAccount[ente.DefaultStripeAccountCountry] + } + subProductID := s.ProductID + for country, plans := range allPlans { + for _, plan := range plans { + if s.PaymentProvider == ente.Stripe && subProductID == plan.StripeID { + return plan, country, nil + } else if s.PaymentProvider == ente.PlayStore && subProductID == plan.AndroidID { + return plan, country, nil + } else if s.PaymentProvider == ente.AppStore && subProductID == plan.IOSID { + return plan, country, nil + } else if (s.PaymentProvider == ente.BitPay || s.PaymentProvider == ente.Paypal) && subProductID == plan.ID { + return plan, country, nil + } + } + } + if s.ProductID == ente.FreePlanProductID || s.ProductID == ente.FamilyPlanProductID { + return ente.BillingPlan{Period: ente.PeriodYear}, "", nil + } + + return ente.BillingPlan{}, "", stacktrace.Propagate(ente.ErrNotFound, "unable to get plan for subscription") +} + +func (c *BillingController) getPlanForCountry(s ente.Subscription, countryCode string) (ente.BillingPlan, error) { + var allPlans []ente.BillingPlan + if s.PaymentProvider == ente.Stripe { + allPlans = c.getAllPlans(countryCode, s.Attributes.StripeAccountCountry) + } else { + allPlans = c.getAllPlans(countryCode, ente.DefaultStripeAccountCountry) + } + subProductID := s.ProductID + for _, plan := range allPlans { + if s.PaymentProvider == ente.Stripe && subProductID == plan.StripeID { + return plan, nil + } else if s.PaymentProvider == ente.PlayStore && subProductID == plan.AndroidID { + return plan, nil + } else if s.PaymentProvider == ente.AppStore && subProductID == plan.IOSID { + return plan, nil + } else if (s.PaymentProvider == ente.BitPay || s.PaymentProvider == ente.Paypal) && subProductID == plan.ID { + return plan, nil + } + } + if s.ProductID == ente.FreePlanProductID || s.ProductID == ente.FamilyPlanProductID { + return ente.BillingPlan{Period: ente.PeriodYear}, nil + } + + // If request has a different `countryCode` because the user is traveling, and we're unable to find a plan for that country, + // fallback to the previous logic for finding a plan. + plan, _, err := c.getPlanWithCountry(s) + if err != nil { + return ente.BillingPlan{}, stacktrace.Propagate(err, "") + } + return plan, nil +} + +func contains(planIDs []string, planID string) bool { + for _, id := range planIDs { + if id == planID { + return true + } + } + return false +} diff --git a/server/pkg/controller/cast/controller.go b/server/pkg/controller/cast/controller.go new file mode 100644 index 000000000..3b76420cc --- /dev/null +++ b/server/pkg/controller/cast/controller.go @@ -0,0 +1,61 @@ +package cast + +import ( + "context" + "github.com/ente-io/museum/ente/cast" + "github.com/ente-io/museum/pkg/controller/access" + castRepo "github.com/ente-io/museum/pkg/repo/cast" + "github.com/ente-io/museum/pkg/utils/auth" + "github.com/ente-io/stacktrace" + "github.com/gin-gonic/gin" +) + +type Controller struct { + CastRepo *castRepo.Repository + AccessCtrl access.Controller +} + +func NewController(castRepo *castRepo.Repository, + accessCtrl access.Controller, +) *Controller { + return &Controller{ + CastRepo: castRepo, + AccessCtrl: accessCtrl, + } +} + +func (c *Controller) RegisterDevice(ctx context.Context, request *cast.RegisterDeviceRequest) (string, error) { + return c.CastRepo.AddCode(ctx, request.DeviceCode, request.PublicKey) +} + +func (c *Controller) GetPublicKey(ctx context.Context, deviceCode string) (string, error) { + return c.CastRepo.GetPubKey(ctx, deviceCode) +} + +func (c *Controller) GetEncCastData(ctx context.Context, deviceCode string) (*string, error) { + return c.CastRepo.GetEncCastData(ctx, deviceCode) +} + +func (c *Controller) InsertCastData(ctx *gin.Context, request *cast.CastRequest) error { + userID := auth.GetUserID(ctx.Request.Header) + return c.CastRepo.InsertCastData(ctx, userID, request.DeviceCode, request.CollectionID, request.CastToken, request.EncPayload) +} + +func (c *Controller) RevokeAllToken(ctx *gin.Context) error { + userID := auth.GetUserID(ctx.Request.Header) + return c.CastRepo.RevokeTokenForUser(ctx, userID) +} + +func (c *Controller) GetCollectionAndCasterIDForToken(ctx *gin.Context, token string) (*cast.AuthContext, error) { + collectId, userId, err := c.CastRepo.GetCollectionAndCasterIDForToken(ctx, token) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + _, err = c.AccessCtrl.GetCollection(ctx, &access.GetCollectionParams{CollectionID: collectId, ActorUserID: userId}) + if err != nil { + return nil, stacktrace.Propagate(err, "failed to verify cast access") + } + go c.CastRepo.UpdateLastUsedAtForToken(ctx, token) + return &cast.AuthContext{UserID: userId, CollectionID: collectId}, nil + +} diff --git a/server/pkg/controller/collection.go b/server/pkg/controller/collection.go new file mode 100644 index 000000000..021cbaa89 --- /dev/null +++ b/server/pkg/controller/collection.go @@ -0,0 +1,781 @@ +package controller + +import ( + "context" + "encoding/json" + "fmt" + "github.com/ente-io/museum/pkg/repo/cast" + "runtime/debug" + "strings" + t "time" + + "github.com/ente-io/museum/pkg/controller/access" + "github.com/gin-contrib/requestid" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/prometheus/client_golang/prometheus" + + "github.com/ente-io/museum/pkg/utils/array" + "github.com/ente-io/museum/pkg/utils/auth" + "github.com/gin-gonic/gin" + + "github.com/ente-io/museum/ente" + "github.com/ente-io/museum/pkg/repo" + "github.com/ente-io/museum/pkg/utils/time" + "github.com/ente-io/stacktrace" + log "github.com/sirupsen/logrus" +) + +const ( + CollectionDiffLimit = 2500 +) + +// CollectionController encapsulates logic that deals with collections +type CollectionController struct { + PublicCollectionCtrl *PublicCollectionController + AccessCtrl access.Controller + BillingCtrl *BillingController + CollectionRepo *repo.CollectionRepository + UserRepo *repo.UserRepository + FileRepo *repo.FileRepository + QueueRepo *repo.QueueRepository + CastRepo *cast.Repository + TaskRepo *repo.TaskLockRepository + LatencyLogger *prometheus.HistogramVec +} + +// Create creates a collection +func (c *CollectionController) Create(collection ente.Collection, ownerID int64) (ente.Collection, error) { + // The key attribute check is to ensure that user does not end up uploading any files before actually setting the key attributes. + if _, keyErr := c.UserRepo.GetKeyAttributes(ownerID); keyErr != nil { + return ente.Collection{}, stacktrace.Propagate(keyErr, "Unable to get keyAttributes") + } + collectionType := collection.Type + collection.Owner.ID = ownerID + collection.UpdationTime = time.Microseconds() + // [20th Dec 2022] Patch on server side untill majority of the existing mobile clients upgrade to a version higher > 0.7.0 + // https://github.com/ente-io/photos-app/pull/725 + if collection.Type == "CollectionType.album" { + collection.Type = "album" + } + if !array.StringInList(collection.Type, ente.ValidCollectionTypes) { + return ente.Collection{}, stacktrace.Propagate(fmt.Errorf("unexpected collection type %s", collection.Type), "") + } + collection, err := c.CollectionRepo.Create(collection) + if err != nil { + if err == ente.ErrUncategorizeCollectionAlreadyExists || err == ente.ErrFavoriteCollectionAlreadyExist { + dbCollection, err := c.CollectionRepo.GetCollectionByType(ownerID, collectionType) + if err != nil { + return ente.Collection{}, stacktrace.Propagate(err, "") + } + if dbCollection.IsDeleted { + return ente.Collection{}, stacktrace.Propagate(fmt.Errorf("special collection of type : %s is deleted", collectionType), "") + } + return dbCollection, nil + } + return ente.Collection{}, stacktrace.Propagate(err, "") + } + return collection, nil +} + +// GetOwned returns the list of collections owned by a user +func (c *CollectionController) GetOwned(userID int64, sinceTime int64, app ente.App) ([]ente.Collection, error) { + collections, err := c.CollectionRepo.GetCollectionsOwnedByUser(userID, sinceTime, app) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + go func() { + defer func() { + if r := recover(); r != nil { + log.Errorf("Panic caught: %s, stack: %s", r, string(debug.Stack())) + } + }() + collectionsV2, errV2 := c.CollectionRepo.GetCollectionsOwnedByUserV2(userID, sinceTime, app) + if errV2 != nil { + log.WithError(errV2).Error("failed to fetch collections using v2") + } + isEqual := cmp.Equal(collections, collectionsV2, cmpopts.SortSlices(func(a, b ente.Collection) bool { return a.ID < b.ID })) + if !isEqual { + jsonV1, _ := json.Marshal(collections) + jsonV2, _ := json.Marshal(collectionsV2) + log.WithFields(log.Fields{ + "v1": string(jsonV1), + "v2": string(jsonV2), + }).Error("collections diff didn't match") + } else { + log.Info("collections diff matched") + } + }() + return collections, nil +} + +// GetOwnedV2 returns the list of collections owned by a user using optimized query +func (c *CollectionController) GetOwnedV2(userID int64, sinceTime int64, app ente.App) ([]ente.Collection, error) { + collections, err := c.CollectionRepo.GetCollectionsOwnedByUserV2(userID, sinceTime, app) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + return collections, nil +} + +// GetCollection returns the collection for given collectionID +func (c *CollectionController) GetCollection(ctx *gin.Context, userID int64, cID int64) (ente.Collection, error) { + resp, err := c.AccessCtrl.GetCollection(ctx, &access.GetCollectionParams{ + CollectionID: cID, + ActorUserID: userID, + IncludeDeleted: true, + }) + if err != nil { + return ente.Collection{}, stacktrace.Propagate(err, "") + } + return resp.Collection, nil +} + +// GetSharedWith returns the list of collections that are shared with a user +func (c *CollectionController) GetSharedWith(userID int64, sinceTime int64, app ente.App) ([]ente.Collection, error) { + collections, err := c.CollectionRepo.GetCollectionsSharedWithUser(userID, sinceTime, app) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + return collections, nil +} + +// Share shares a collection with a user +func (c *CollectionController) Share(ctx *gin.Context, req ente.AlterShareRequest) ([]ente.CollectionUser, error) { + fromUserID := auth.GetUserID(ctx.Request.Header) + cID := req.CollectionID + encryptedKey := req.EncryptedKey + toUserEmail := strings.ToLower(strings.TrimSpace(req.Email)) + // default role type + role := ente.VIEWER + if req.Role != nil { + role = *req.Role + } + + toUserID, err := c.UserRepo.GetUserIDWithEmail(toUserEmail) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + if toUserID == fromUserID { + return nil, stacktrace.Propagate(ente.ErrBadRequest, "Can not share collection with self") + } + collection, err := c.CollectionRepo.Get(cID) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + if !collection.AllowSharing() { + return nil, stacktrace.Propagate(ente.ErrBadRequest, fmt.Sprintf("sharing %s is not allowed", collection.Type)) + } + if fromUserID != collection.Owner.ID { + return nil, stacktrace.Propagate(ente.ErrPermissionDenied, "") + } + err = c.BillingCtrl.HasActiveSelfOrFamilySubscription(fromUserID) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + err = c.CollectionRepo.Share(cID, fromUserID, toUserID, encryptedKey, role, time.Microseconds()) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + sharees, err := c.GetSharees(ctx, cID, fromUserID) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + return sharees, nil +} + +// UnShare unshares a collection with a user +func (c *CollectionController) UnShare(ctx *gin.Context, cID int64, fromUserID int64, toUserEmail string) ([]ente.CollectionUser, error) { + toUserID, err := c.UserRepo.GetUserIDWithEmail(toUserEmail) + if err != nil { + return nil, stacktrace.Propagate(ente.ErrNotFound, "") + } + collection, err := c.CollectionRepo.Get(cID) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + isLeavingCollection := toUserID == fromUserID + if fromUserID != collection.Owner.ID || isLeavingCollection { + return nil, stacktrace.Propagate(ente.ErrPermissionDenied, "") + } + err = c.CollectionRepo.UnShare(cID, toUserID) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + err = c.CastRepo.RevokeForGivenUserAndCollection(ctx, cID, toUserID) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + sharees, err := c.GetSharees(ctx, cID, fromUserID) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + return sharees, nil +} + +// Leave leaves the collection owned by someone else, +func (c *CollectionController) Leave(ctx *gin.Context, cID int64) error { + userID := auth.GetUserID(ctx.Request.Header) + collection, err := c.CollectionRepo.Get(cID) + if err != nil { + return stacktrace.Propagate(err, "") + } + if userID == collection.Owner.ID { + return stacktrace.Propagate(ente.ErrPermissionDenied, "can not leave collection owned by self") + } + sharedCollectionIDs, err := c.CollectionRepo.GetCollectionIDsSharedWithUser(userID) + if err != nil { + return stacktrace.Propagate(err, "") + } + if !array.Int64InList(cID, sharedCollectionIDs) { + return nil + } + err = c.CastRepo.RevokeForGivenUserAndCollection(ctx, cID, userID) + if err != nil { + return stacktrace.Propagate(err, "") + } + err = c.CollectionRepo.UnShare(cID, userID) + if err != nil { + return stacktrace.Propagate(err, "") + } + return nil +} + +func (c *CollectionController) UpdateShareeMagicMetadata(ctx *gin.Context, req ente.UpdateCollectionMagicMetadata) error { + actorUserId := auth.GetUserID(ctx.Request.Header) + resp, err := c.AccessCtrl.GetCollection(ctx, &access.GetCollectionParams{ + CollectionID: req.ID, + ActorUserID: actorUserId, + }) + if err != nil { + return stacktrace.Propagate(err, "") + } + if resp.Collection.Owner.ID == actorUserId { + return stacktrace.Propagate(ente.NewBadRequestWithMessage("owner can not update sharee magic metadata"), "") + } + err = c.CollectionRepo.UpdateShareeMetadata(req.ID, resp.Collection.Owner.ID, actorUserId, req.MagicMetadata, time.Microseconds()) + if err != nil { + return stacktrace.Propagate(err, "failed to update sharee magic metadata") + } + return nil +} + +// ShareURL generates a public auth-token for the given collectionID +func (c *CollectionController) ShareURL(ctx context.Context, userID int64, req ente.CreatePublicAccessTokenRequest) ( + ente.PublicURL, error) { + collection, err := c.CollectionRepo.Get(req.CollectionID) + if err != nil { + return ente.PublicURL{}, stacktrace.Propagate(err, "") + } + if !collection.AllowSharing() { + return ente.PublicURL{}, stacktrace.Propagate(ente.ErrBadRequest, fmt.Sprintf("sharing %s is not allowed", collection.Type)) + } + if userID != collection.Owner.ID { + return ente.PublicURL{}, stacktrace.Propagate(ente.ErrPermissionDenied, "") + } + err = c.BillingCtrl.HasActiveSelfOrFamilySubscription(userID) + if err != nil { + return ente.PublicURL{}, stacktrace.Propagate(err, "") + } + response, err := c.PublicCollectionCtrl.CreateAccessToken(ctx, req) + if err != nil { + return ente.PublicURL{}, stacktrace.Propagate(err, "") + } + return response, nil +} + +// UpdateShareURL updates the shared url configuration +func (c *CollectionController) UpdateShareURL(ctx context.Context, userID int64, req ente.UpdatePublicAccessTokenRequest) ( + ente.PublicURL, error) { + if err := c.verifyOwnership(req.CollectionID, userID); err != nil { + return ente.PublicURL{}, stacktrace.Propagate(err, "") + } + err := c.BillingCtrl.HasActiveSelfOrFamilySubscription(userID) + if err != nil { + return ente.PublicURL{}, stacktrace.Propagate(err, "") + } + response, err := c.PublicCollectionCtrl.UpdateSharedUrl(ctx, req) + if err != nil { + return ente.PublicURL{}, stacktrace.Propagate(err, "") + } + return response, nil +} + +// DisableSharedURL disable a public auth-token for the given collectionID +func (c *CollectionController) DisableSharedURL(ctx context.Context, userID int64, cID int64) error { + if err := c.verifyOwnership(cID, userID); err != nil { + return stacktrace.Propagate(err, "") + } + err := c.PublicCollectionCtrl.Disable(ctx, cID) + return stacktrace.Propagate(err, "") +} + +// AddFiles adds files to a collection +func (c *CollectionController) AddFiles(ctx *gin.Context, userID int64, files []ente.CollectionFileItem, cID int64) error { + + resp, err := c.AccessCtrl.GetCollection(ctx, &access.GetCollectionParams{ + CollectionID: cID, + ActorUserID: userID, + IncludeDeleted: false, + }) + if err != nil { + return stacktrace.Propagate(err, "failed to verify collection access") + } + if !resp.Role.CanAdd() { + return stacktrace.Propagate(ente.ErrPermissionDenied, fmt.Sprintf("user %d with role %s can not add files", userID, *resp.Role)) + } + + collectionOwnerID := resp.Collection.Owner.ID + filesOwnerID := userID + // Verify that the user owns each file + fileIDs := make([]int64, 0) + for _, file := range files { + fileIDs = append(fileIDs, file.ID) + } + err = c.AccessCtrl.VerifyFileOwnership(ctx, &access.VerifyFileOwnershipParams{ + ActorUserId: userID, + FileIDs: fileIDs, + }) + + if err != nil { + return stacktrace.Propagate(err, "Failed to verify fileOwnership") + } + err = c.CollectionRepo.AddFiles(cID, collectionOwnerID, files, filesOwnerID) + if err != nil { + return stacktrace.Propagate(err, "") + } + return nil +} + +// RestoreFiles restore files from trash and add to the collection +func (c *CollectionController) RestoreFiles(ctx *gin.Context, userID int64, cID int64, files []ente.CollectionFileItem) error { + _, err := c.AccessCtrl.GetCollection(ctx, &access.GetCollectionParams{ + CollectionID: cID, + ActorUserID: userID, + IncludeDeleted: false, + VerifyOwner: true, + }) + if err != nil { + return stacktrace.Propagate(err, "failed to verify collection access") + } + // Verify that the user owns each file + for _, file := range files { + // todo #perf find owners of all files + ownerID, err := c.FileRepo.GetOwnerID(file.ID) + if err != nil { + return stacktrace.Propagate(err, "") + } + if ownerID != userID { + log.WithFields(log.Fields{ + "file_id": file.ID, + "owner_id": ownerID, + "user_id": userID, + }).Error("invalid ops: can't add file which isn't owned by user") + return stacktrace.Propagate(ente.ErrPermissionDenied, "") + } + } + err = c.CollectionRepo.RestoreFiles(ctx, userID, cID, files) + if err != nil { + return stacktrace.Propagate(err, "") + } + return nil +} + +// MoveFiles from one collection to another collection. Both the collections and files should belong to +// single user +func (c *CollectionController) MoveFiles(ctx *gin.Context, req ente.MoveFilesRequest) error { + userID := auth.GetUserID(ctx.Request.Header) + _, err := c.AccessCtrl.GetCollection(ctx, &access.GetCollectionParams{ + CollectionID: req.FromCollectionID, + ActorUserID: userID, + IncludeDeleted: false, + VerifyOwner: true, + }) + if err != nil { + return stacktrace.Propagate(err, "failed to verify if actor owns fromCollection") + } + + _, err = c.AccessCtrl.GetCollection(ctx, &access.GetCollectionParams{ + CollectionID: req.ToCollectionID, + ActorUserID: userID, + IncludeDeleted: false, + VerifyOwner: true, + }) + if err != nil { + return stacktrace.Propagate(err, "failed to verify if actor owns toCollection") + } + + // Verify that the user owns each file + fileIDs := make([]int64, 0) + for _, file := range req.Files { + fileIDs = append(fileIDs, file.ID) + } + err = c.AccessCtrl.VerifyFileOwnership(ctx, &access.VerifyFileOwnershipParams{ + ActorUserId: userID, + FileIDs: fileIDs, + }) + if err != nil { + stacktrace.Propagate(err, "Failed to verify fileOwnership") + } + err = c.CollectionRepo.MoveFiles(ctx, req.ToCollectionID, req.FromCollectionID, req.Files, userID, userID) + return stacktrace.Propagate(err, "") // return nil if err is nil +} + +// RemoveFilesV3 removes files from a collection as long as owner(s) of the file is different from collection owner +func (c *CollectionController) RemoveFilesV3(ctx *gin.Context, req ente.RemoveFilesV3Request) error { + actorUserID := auth.GetUserID(ctx.Request.Header) + resp, err := c.AccessCtrl.GetCollection(ctx, &access.GetCollectionParams{ + CollectionID: req.CollectionID, + ActorUserID: actorUserID, + VerifyOwner: false, + }) + if err != nil { + return stacktrace.Propagate(err, "failed to verify collection access") + } + err = c.isRemoveAllowed(ctx, actorUserID, resp.Collection.Owner.ID, req.FileIDs) + if err != nil { + return stacktrace.Propagate(err, "file removal check failed") + } + err = c.CollectionRepo.RemoveFilesV3(ctx, req.CollectionID, req.FileIDs) + if err != nil { + return stacktrace.Propagate(err, "failed to remove files") + } + return nil +} + +// isRemoveAllowed verifies that given set of files can be removed from the collection or not +func (c *CollectionController) isRemoveAllowed(ctx *gin.Context, actorUserID int64, collectionOwnerID int64, fileIDs []int64) error { + ownerToFilesMap, err := c.FileRepo.GetOwnerToFileIDsMap(ctx, fileIDs) + if err != nil { + return stacktrace.Propagate(err, "failed to get owner to fileIDs map") + } + // verify that none of the file belongs to the collection owner + if _, ok := ownerToFilesMap[collectionOwnerID]; ok { + return ente.NewBadRequestWithMessage("can not remove files owned by album owner") + } + + if collectionOwnerID != actorUserID { + // verify that user is only trying to remove files owned by them + if len(ownerToFilesMap) > 1 { + return stacktrace.Propagate(ente.ErrPermissionDenied, "can not remove files owned by others") + } + // verify that user is only trying to remove files owned by them + if _, ok := ownerToFilesMap[actorUserID]; !ok { + return stacktrace.Propagate(ente.ErrPermissionDenied, "can not remove files owned by others") + } + } + return nil +} + +// GetDiffV2 returns the changes in user's collections since a timestamp, along with hasMore bool flag. +func (c *CollectionController) GetDiffV2(ctx *gin.Context, cID int64, userID int64, sinceTime int64) ([]ente.File, bool, error) { + startTime := t.Now() + reqContextLogger := log.WithFields(log.Fields{ + "user_id": userID, + "collection_id": cID, + "since_time": sinceTime, + "req_id": requestid.Get(ctx), + }) + reqContextLogger.Info("Start") + defer func() { + c.LatencyLogger.WithLabelValues("CollectionController.GetDiffV2"). + Observe(float64(t.Since(startTime).Milliseconds())) + }() + _, err := c.AccessCtrl.GetCollection(ctx, &access.GetCollectionParams{ + CollectionID: cID, + ActorUserID: userID, + }) + reqContextLogger.Info("Accessible") + if err != nil { + return nil, false, stacktrace.Propagate(err, "failed to verify access") + } + diff, hasMore, err := c.getDiff(cID, sinceTime, CollectionDiffLimit, reqContextLogger) + reqContextLogger.Info("Received diff") + if err != nil { + return nil, false, stacktrace.Propagate(err, "") + } + // hide private metadata before returning files info in diff + for idx := range diff { + if diff[idx].OwnerID != userID { + diff[idx].MagicMetadata = nil + } + } + reqContextLogger.Info("Function end") + return diff, hasMore, nil +} + +func (c *CollectionController) GetFile(ctx *gin.Context, collectionID int64, fileID int64) (*ente.File, error) { + userID := auth.GetUserID(ctx.Request.Header) + files, err := c.CollectionRepo.GetFile(collectionID, fileID) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + if len(files) == 0 { + return nil, stacktrace.Propagate(&ente.ErrFileNotFoundInAlbum, "") + } + + file := files[0] + if file.OwnerID != userID { + cIDs, err := c.CollectionRepo.GetCollectionIDsSharedWithUser(userID) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + if !array.Int64InList(collectionID, cIDs) { + return nil, stacktrace.Propagate(ente.ErrPermissionDenied, "") + } + } + if file.IsDeleted { + return nil, stacktrace.Propagate(&ente.ErrFileNotFoundInAlbum, "") + } + return &file, nil +} + +// GetPublicDiff returns the changes in the collections since a timestamp, along with hasMore bool flag. +func (c *CollectionController) GetPublicDiff(ctx *gin.Context, sinceTime int64) ([]ente.File, bool, error) { + accessContext := auth.MustGetPublicAccessContext(ctx) + startTime := t.Now() + reqContextLogger := log.WithFields(log.Fields{ + "public_id": accessContext.ID, + "collection_id": accessContext.CollectionID, + "since_time": sinceTime, + "req_id": requestid.Get(ctx), + }) + defer func() { + c.LatencyLogger.WithLabelValues("CollectionController.GetPublicDiff"). + Observe(float64(t.Since(startTime).Milliseconds())) + }() + diff, hasMore, err := c.getDiff(accessContext.CollectionID, sinceTime, CollectionDiffLimit, reqContextLogger) + if err != nil { + return nil, false, stacktrace.Propagate(err, "") + } + // hide private metadata before returning files info in diff + for idx := range diff { + if diff[idx].MagicMetadata != nil { + diff[idx].MagicMetadata = nil + } + } + return diff, hasMore, nil +} + +// getDiff returns the diff in user's collection since a timestamp, along with hasMore bool flag. +// The function will never return partial result for a version. To maintain this promise, it will not be able to honor +// the limit parameter. Based on the db state, compared to the limit, the diff length can be +// less (case 1), more (case 2), or same (case 3, 4) +// Example: Assume we have 11 files with following versions: v0, v1, v1, v1, v1, v1, v1, v1, v2, v2, v2 (count = 7 v1, 3 v2) +// client has synced up till version v0. +// case 1: ( sinceTime: v0, limit = 8): +// The method will discard the entries with version v2 and return only 7 entries with version v1. +// case 2: (sinceTime: v0, limit 5): +// Instead of returning 5 entries with version V1, method will return all 7 entries with version v1. +// case 3: (sinceTime: v0, limit 7): +// The method will return all 7 entries with version V1. +// case 4: (sinceTime: v0, limit >=10): +// The method will all 10 entries in the diff +func (c *CollectionController) getDiff(cID int64, sinceTime int64, limit int, logger *log.Entry) ([]ente.File, bool, error) { + logger.Info("getDiff") + // request for limit +1 files + startTime := t.Now() + defer func() { + c.LatencyLogger.WithLabelValues("CollectionController.getDiff"). + Observe(float64(t.Since(startTime).Milliseconds())) + }() + diffLimitPlusOne, err := c.CollectionRepo.GetDiff(cID, sinceTime, limit+1) + logger.Info("Got diff from repo") + if err != nil { + return nil, false, stacktrace.Propagate(err, "") + } + if len(diffLimitPlusOne) <= limit { + // case 4: all files changed after sinceTime are included. + return diffLimitPlusOne, false, nil + } + lastFileVersion := diffLimitPlusOne[limit].UpdationTime + filteredDiffs := c.removeFilesWithVersion(diffLimitPlusOne, lastFileVersion) + logger.Info("Removed files with out of bounds version") + if len(filteredDiffs) > 0 { // case 1 or case 3 + return filteredDiffs, true, nil + } + // case 2 + diff, err := c.CollectionRepo.GetFilesWithVersion(cID, lastFileVersion) + logger.Info("Got diff of files with latest file version") + if err != nil { + return nil, false, stacktrace.Propagate(err, "") + } + return diff, true, nil +} + +// removeFilesWithVersion returns filtered list of files are removing all files with given version. +// Important: The method assumes that files are sorted by increasing order of File.UpdationTime +func (c *CollectionController) removeFilesWithVersion(files []ente.File, version int64) []ente.File { + var i = len(files) - 1 + for ; i >= 0; i-- { + if files[i].UpdationTime != version { + // found index (from end) where file's version is different from given version + break + } + } + return files[0 : i+1] +} + +// GetSharees returns the list of users a collection has been shared with +func (c *CollectionController) GetSharees(ctx *gin.Context, cID int64, userID int64) ([]ente.CollectionUser, error) { + _, err := c.AccessCtrl.GetCollection(ctx, &access.GetCollectionParams{ + CollectionID: cID, + ActorUserID: userID, + }) + if err != nil { + return nil, stacktrace.Propagate(err, "Access check failed") + } + sharees, err := c.CollectionRepo.GetSharees(cID) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + return sharees, nil +} + +// Trash deletes a given collection and files exclusive to the collection +func (c *CollectionController) Trash(ctx *gin.Context, userID int64, cID int64) error { + resp, err := c.AccessCtrl.GetCollection(ctx, &access.GetCollectionParams{ + CollectionID: cID, + ActorUserID: userID, + IncludeDeleted: true, + VerifyOwner: true, + }) + if err != nil { + return stacktrace.Propagate(err, "") + } + if !resp.Collection.AllowDelete() { + return stacktrace.Propagate(ente.ErrBadRequest, fmt.Sprintf("deleting albums of type %s is not allowed", resp.Collection.Type)) + } + if resp.Collection.IsDeleted { + log.WithFields(log.Fields{ + "c_id": cID, + "user_id": userID, + }).Warning("Collection is already deleted") + return nil + } + err = c.PublicCollectionCtrl.Disable(ctx, cID) + if err != nil { + return stacktrace.Propagate(err, "failed to disabled public share url") + } + err = c.CollectionRepo.ScheduleDelete(cID, true) + if err != nil { + return stacktrace.Propagate(err, "") + } + return nil +} + +// TrashV3 deletes a given collection and based on user input (TrashCollectionV3Request.KeepFiles as FALSE) , it will move all files present in the underlying collection +// to trash. +func (c *CollectionController) TrashV3(ctx *gin.Context, req ente.TrashCollectionV3Request) error { + if req.KeepFiles == nil { + return ente.ErrBadRequest + } + userID := auth.GetUserID(ctx.Request.Header) + cID := req.CollectionID + resp, err := c.AccessCtrl.GetCollection(ctx, &access.GetCollectionParams{ + CollectionID: cID, + ActorUserID: userID, + IncludeDeleted: true, + VerifyOwner: true, + }) + if err != nil { + return stacktrace.Propagate(err, "") + } + if !resp.Collection.AllowDelete() { + return stacktrace.Propagate(ente.ErrBadRequest, fmt.Sprintf("deleting albums of type %s is not allowed", resp.Collection.Type)) + } + if resp.Collection.IsDeleted { + log.WithFields(log.Fields{ + "c_id": cID, + "user_id": userID, + }).Warning("Collection is already deleted") + return nil + } + + if *req.KeepFiles { + // Verify that all files from this particular collections have been removed. + count, err := c.CollectionRepo.GetCollectionsFilesCount(cID) + if err != nil { + return stacktrace.Propagate(err, "") + } + if count != 0 { + return stacktrace.Propagate(&ente.ErrCollectionNotEmpty, fmt.Sprintf("Collection file count %d", count)) + } + + } + err = c.PublicCollectionCtrl.Disable(ctx, cID) + if err != nil { + return stacktrace.Propagate(err, "failed to disabled public share url") + } + err = c.CastRepo.RevokeTokenForCollection(ctx, cID) + if err != nil { + return stacktrace.Propagate(err, "failed to revoke cast token") + } + // Continue with current delete flow till. This disables sharing for this collection and then queue it up for deletion + err = c.CollectionRepo.ScheduleDelete(cID, false) + if err != nil { + return stacktrace.Propagate(err, "") + } + return nil +} + +// Rename updates the collection's name +func (c *CollectionController) Rename(userID int64, cID int64, encryptedName string, nameDecryptionNonce string) error { + if err := c.verifyOwnership(cID, userID); err != nil { + return stacktrace.Propagate(err, "") + } + err := c.CollectionRepo.Rename(cID, encryptedName, nameDecryptionNonce) + if err != nil { + return stacktrace.Propagate(err, "") + } + return nil +} + +// UpdateMagicMetadata updates the magic metadata for given collection +func (c *CollectionController) UpdateMagicMetadata(ctx *gin.Context, request ente.UpdateCollectionMagicMetadata, isPublicMetadata bool) error { + userID := auth.GetUserID(ctx.Request.Header) + if err := c.verifyOwnership(request.ID, userID); err != nil { + return stacktrace.Propagate(err, "") + } + // todo: verify version mismatch later. We are not planning to resync collection on clients, + // so ignore that check until then. Ideally, after file size info sync, we should enable + err := c.CollectionRepo.UpdateMagicMetadata(ctx, request.ID, request.MagicMetadata, isPublicMetadata) + if err != nil { + return stacktrace.Propagate(err, "") + } + return nil +} + +func (c *CollectionController) HandleAccountDeletion(ctx context.Context, userID int64, logger *log.Entry) error { + logger.Info("disabling shared collections with or by the user") + sharedCollections, err := c.CollectionRepo.GetAllSharedCollections(ctx, userID) + if err != nil { + return stacktrace.Propagate(err, "") + } + logger.Info(fmt.Sprintf("shared collections count: %d", len(sharedCollections))) + for _, shareCollection := range sharedCollections { + logger.WithField("shared_collection", shareCollection).Info("disable shared collection") + err = c.CollectionRepo.UnShare(shareCollection.CollectionID, shareCollection.ToUserID) + if err != nil { + return stacktrace.Propagate(err, "") + } + } + err = c.CastRepo.RevokeTokenForUser(ctx, userID) + if err != nil { + return stacktrace.Propagate(err, "failed to revoke cast token for user") + } + err = c.PublicCollectionCtrl.HandleAccountDeletion(ctx, userID, logger) + return stacktrace.Propagate(err, "") +} + +// Verify that user owns the collection +func (c *CollectionController) verifyOwnership(cID int64, userID int64) error { + collection, err := c.CollectionRepo.Get(cID) + if err != nil { + return stacktrace.Propagate(err, "") + } + if userID != collection.Owner.ID { + return stacktrace.Propagate(ente.ErrPermissionDenied, "") + } + return nil +} diff --git a/server/pkg/controller/collection_cast.go b/server/pkg/controller/collection_cast.go new file mode 100644 index 000000000..042870263 --- /dev/null +++ b/server/pkg/controller/collection_cast.go @@ -0,0 +1,50 @@ +package controller + +import ( + "github.com/ente-io/museum/ente" + "github.com/ente-io/museum/pkg/utils/auth" + "github.com/ente-io/stacktrace" + "github.com/gin-contrib/requestid" + "github.com/gin-gonic/gin" + log "github.com/sirupsen/logrus" + t "time" +) + +func (c *CollectionController) GetCastCollection(ctx *gin.Context) (*ente.Collection, error) { + castCtx := auth.GetCastCtx(ctx) + collection, err := c.CollectionRepo.Get(castCtx.CollectionID) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + if collection.IsDeleted { + return nil, stacktrace.Propagate(ente.ErrNotFound, "collection is deleted") + } + return &collection, nil +} + +// GetCastDiff returns the changes in the collections since a timestamp, along with hasMore bool flag. +func (c *CollectionController) GetCastDiff(ctx *gin.Context, sinceTime int64) ([]ente.File, bool, error) { + castCtx := auth.GetCastCtx(ctx) + collectionID := castCtx.CollectionID + startTime := t.Now() + reqContextLogger := log.WithFields(log.Fields{ + "collection_id": collectionID, + "since_time": sinceTime, + "req_id": requestid.Get(ctx), + }) + defer func() { + c.LatencyLogger.WithLabelValues("CollectionController.GetCastDiff"). + Observe(float64(t.Since(startTime).Milliseconds())) + }() + diff, hasMore, err := c.getDiff(collectionID, sinceTime, CollectionDiffLimit, reqContextLogger) + if err != nil { + return nil, false, stacktrace.Propagate(err, "") + } + // hide private metadata before returning files info in diff + for idx := range diff { + if diff[idx].MagicMetadata != nil { + diff[idx].MagicMetadata = nil + } + } + return diff, hasMore, nil +} diff --git a/server/pkg/controller/commonbilling/controller.go b/server/pkg/controller/commonbilling/controller.go new file mode 100644 index 000000000..1fe74c338 --- /dev/null +++ b/server/pkg/controller/commonbilling/controller.go @@ -0,0 +1,61 @@ +package commonbilling + +import ( + "context" + "fmt" + "github.com/ente-io/museum/pkg/repo" + "github.com/ente-io/museum/pkg/repo/storagebonus" + "github.com/ente-io/stacktrace" +) + +type Controller struct { + StorageBonusRepo *storagebonus.Repository + UserRepo *repo.UserRepository + UsageRepo *repo.UsageRepository +} + +func NewController( + storageBonusRepo *storagebonus.Repository, + userRepo *repo.UserRepository, + usageRepo *repo.UsageRepository, +) *Controller { + return &Controller{ + StorageBonusRepo: storageBonusRepo, + UserRepo: userRepo, + UsageRepo: usageRepo, + } +} + +func (c *Controller) CanDowngradeToGivenStorage(newStorage int64, userID int64) (bool, error) { + adminID, err := c.UserRepo.GetFamilyAdminID(userID) + if err != nil { + return false, stacktrace.Propagate(err, "") + } + + if adminID == nil { + bonusStorage, bonErr := c.StorageBonusRepo.GetPaidAddonSurplusStorage(context.Background(), userID) + if bonErr != nil { + return false, stacktrace.Propagate(err, "") + } + usage, err := c.UsageRepo.GetUsage(userID) + if err != nil { + return false, stacktrace.Propagate(err, "") + } + if usage > (newStorage + *bonusStorage) { + return false, stacktrace.Propagate(err, fmt.Sprintf("user with %d usage can not downgrade to %d", usage, newStorage)) + } + } else { + bonusStorage, bonErr := c.StorageBonusRepo.GetPaidAddonSurplusStorage(context.Background(), *adminID) + if bonErr != nil { + return false, stacktrace.Propagate(err, "") + } + usage, err := c.UsageRepo.StorageForFamilyAdmin(*adminID) + if err != nil { + return false, stacktrace.Propagate(err, "") + } + if usage > (newStorage + *bonusStorage) { + return false, stacktrace.Propagate(err, fmt.Sprintf("familyUser with %d usage can not downgrade to %d", usage, newStorage)) + } + } + return true, nil +} diff --git a/server/pkg/controller/data_cleanup/controller.go b/server/pkg/controller/data_cleanup/controller.go new file mode 100644 index 000000000..d3e2c61a6 --- /dev/null +++ b/server/pkg/controller/data_cleanup/controller.go @@ -0,0 +1,201 @@ +package data_cleanup + +import ( + "context" + "errors" + "fmt" + + "github.com/ente-io/museum/ente" + entity "github.com/ente-io/museum/ente/data_cleanup" + "github.com/ente-io/museum/pkg/repo" + "github.com/ente-io/museum/pkg/repo/datacleanup" + "github.com/ente-io/museum/pkg/utils/time" + "github.com/ente-io/stacktrace" + log "github.com/sirupsen/logrus" +) + +type DeleteUserCleanupController struct { + Repo *datacleanup.Repository + UserRepo *repo.UserRepository + CollectionRepo *repo.CollectionRepository + TaskLockRepo *repo.TaskLockRepository + TrashRepo *repo.TrashRepository + UsageRepo *repo.UsageRepository + running bool + HostName string +} + +const ( + // nextStageDelayInHoursOnError is number of afters after which next attempt should be made to process + // current stage. + nextStageDelayInHoursOnError = 2 + + // maximum number of storage check attempt before moving to the next stage. + maxStorageCheckAttempt = 10 +) + +// DeleteDataCron delete trashed files which are in trash since repo.TrashDurationInDays +func (c *DeleteUserCleanupController) DeleteDataCron() { + if c.running { + log.Info("Already running DeleteDataCron, skipping cron") + return + } + c.running = true + defer func() { + c.running = false + }() + + ctx := context.Background() + items, err := c.Repo.GetItemsPendingCompletion(ctx, 100) + if err != nil { + log.WithError(err).Info("Failed to get items for cleanup") + return + } + if len(items) > 0 { + log.WithField("count", len(items)).Info("Found pending items") + for _, item := range items { + c.deleteUserData(ctx, item) + } + } + +} + +func (c *DeleteUserCleanupController) deleteUserData(ctx context.Context, item *entity.DataCleanup) { + logger := log.WithFields(log.Fields{ + "user_id": item.UserID, + "stage": item.Stage, + "attempt_count": item.StageAttemptCount, + "flow": "delete_user_data", + }) + lockName := fmt.Sprintf("delete_user_data-%d", item.UserID) + lockStatus, err := c.TaskLockRepo.AcquireLock(lockName, time.MicrosecondsAfterHours(1), c.HostName) + if err != nil || !lockStatus { + if err != nil { + logger.Error("error while acquiring lock") + } else { + logger.Warn("lock is already head by another instance") + } + return + } + defer func() { + releaseErr := c.TaskLockRepo.ReleaseLock(lockName) + if releaseErr != nil { + logger.WithError(releaseErr).Error("Error while releasing lock") + } + }() + + logger.Info(fmt.Sprintf("Delete data for stage %s", item.Stage)) + + switch item.Stage { + case entity.Scheduled: + err = c.startCleanup(ctx, item) + case entity.Collection: + err = c.deleteCollections(ctx, item) + case entity.Trash: + err = c.emptyTrash(ctx, item) + case entity.Storage: + err = c.storageCheck(ctx, item) + default: + err = fmt.Errorf("unexpected stage %s", item.Stage) + } + if err != nil { + logger.WithError(err).Error("error while processing data deletion") + err2 := c.Repo.ScheduleNextAttemptAfterNHours(ctx, item.UserID, nextStageDelayInHoursOnError) + if err2 != nil { + logger.Error(err) + return + } + } + +} + +// startClean up will just verify that user +func (c *DeleteUserCleanupController) startCleanup(ctx context.Context, item *entity.DataCleanup) error { + if err := c.isDeleted(item); err != nil { + return stacktrace.Propagate(err, "") + } + // move to next stage for deleting collection + return c.Repo.MoveToNextStage(ctx, item.UserID, entity.Collection, time.Microseconds()) +} + +// deleteCollection will schedule all the collections for deletion and queue up Trash stage to run after 30 min +func (c *DeleteUserCleanupController) deleteCollections(ctx context.Context, item *entity.DataCleanup) error { + collectionsMap, err := c.CollectionRepo.GetCollectionIDsOwnedByUser(item.UserID) + if err != nil { + return stacktrace.Propagate(err, "") + } + for collectionID, isAlreadyDeleted := range collectionsMap { + if !isAlreadyDeleted { + // Delete all files in the collection + err = c.CollectionRepo.ScheduleDelete(collectionID, false) + if err != nil { + return stacktrace.Propagate(err, fmt.Sprintf("error while deleting collection %d", collectionID)) + } + } + } + /* todo: neeraj : verify that all collection delete request are processed before moving to empty trash stage. + */ + return c.Repo.MoveToNextStage(ctx, item.UserID, entity.Trash, time.MicrosecondsAfterMinutes(60)) +} + +func (c *DeleteUserCleanupController) emptyTrash(ctx context.Context, item *entity.DataCleanup) error { + err := c.TrashRepo.EmptyTrash(ctx, item.UserID, time.Microseconds()) + if err != nil { + return stacktrace.Propagate(err, "") + } + // schedule storage consumed check for the user after 60min. Trash should ideally get emptied after 60 min + return c.Repo.MoveToNextStage(ctx, item.UserID, entity.Storage, time.MicrosecondsAfterMinutes(60)) +} + +func (c *DeleteUserCleanupController) completeCleanup(ctx context.Context, item *entity.DataCleanup) error { + err := c.Repo.DeleteTableData(ctx, item.UserID) + if err != nil { + return stacktrace.Propagate(err, "failed to delete table data for user") + } + return c.Repo.MoveToNextStage(ctx, item.UserID, entity.Completed, time.Microseconds()) +} + +// storageCheck validates that user's usage is zero after all collections are deleted and trashed files are processed. +// This check act as another data-integrity check for our db. If even after multiple attempts, storage is still not zero +// we mark the clean-up as done. +func (c *DeleteUserCleanupController) storageCheck(ctx context.Context, item *entity.DataCleanup) error { + usage, err := c.UsageRepo.GetUsage(item.UserID) + if err != nil { + return stacktrace.Propagate(err, "") + } + if usage != 0 { + // check if trash still has entry + timeStamp, err2 := c.TrashRepo.GetTimeStampForLatestNonDeletedEntry(item.UserID) + if err2 != nil { + return stacktrace.Propagate(err2, "failed to fetch timestamp") + } + // no entry in trash + if timeStamp != nil { + log.WithFields(log.Fields{ + "user_id": item.UserID, + "flow": "delete_user_data", + "timeStamp": timeStamp, + }).Info("trash is not empty") + err = c.TrashRepo.EmptyTrash(ctx, item.UserID, *timeStamp) + if err != nil { + return stacktrace.Propagate(err, "") + } + } else if item.StageAttemptCount >= maxStorageCheckAttempt { + // Note: if storage is still not zero after maxStorageCheckAttempt attempts and trash is empty, mark the clean-up as done + return c.completeCleanup(ctx, item) + } + return fmt.Errorf("storage consumed is not zero: %d", usage) + } + return c.completeCleanup(ctx, item) +} + +func (c *DeleteUserCleanupController) isDeleted(item *entity.DataCleanup) error { + _, err := c.UserRepo.Get(item.UserID) + if err == nil { + return stacktrace.Propagate(ente.NewBadRequestWithMessage("User ID is linked to undeleted account"), "") + } + if !errors.Is(err, ente.ErrUserDeleted) { + return stacktrace.Propagate(err, "error while getting the user") + } + return nil +} diff --git a/server/pkg/controller/discord/discord.go b/server/pkg/controller/discord/discord.go new file mode 100644 index 000000000..2c07fe404 --- /dev/null +++ b/server/pkg/controller/discord/discord.go @@ -0,0 +1,122 @@ +package discord + +import ( + "fmt" + "time" + + "github.com/bwmarrin/discordgo" + "github.com/ente-io/museum/pkg/repo" + t "github.com/ente-io/museum/pkg/utils/time" + log "github.com/sirupsen/logrus" + "github.com/spf13/viper" +) + +// DiscordController is an devops aid. If Discord credentials are configured, +// then it will send notifications to Discord channels on specified events. +type DiscordController struct { + MonaLisa *discordgo.Session + ChaChing *discordgo.Session + HostName string + Environment string + UserRepo *repo.UserRepository +} + +func NewDiscordController(userRepo *repo.UserRepository, hostName string, environment string) *DiscordController { + return &DiscordController{ + MonaLisa: createBot("Mona Lisa", "discord.bot.mona-lisa.token"), + ChaChing: createBot("Cha Ching", "discord.bot.cha-ching.token"), + HostName: hostName, + Environment: environment, + UserRepo: userRepo, + } +} + +func createBot(name string, tokenConfigKey string) *discordgo.Session { + silent := viper.GetBool("internal.silent") + if silent { + return nil + } + + token := viper.GetString(tokenConfigKey) + if token == "" { + return nil + } + + session, err := discordgo.New("Bot " + token) + if err != nil { + log.Warnf("Could not create Discord bot %s: %s", name, err) + } + + return session +} + +// The actual send +func (c *DiscordController) sendMessage(bot *discordgo.Session, channel string, message string) { + if bot == nil { + log.Infof("Skipping sending Discord message: %s", message) + return + } + + _, err := bot.ChannelMessageSend(channel, message) + if err != nil { + log.Warnf("Could not send message {%s} to Discord channel {%s} due to error {%s}", message, channel, err) + } +} + +// Send a message related to server status or important events/errors. +func (c *DiscordController) Notify(message string) { + c.sendMessage(c.MonaLisa, viper.GetString("discord.bot.mona-lisa.channel"), message) +} + +// Send a message related to subscriptions. +func (c *DiscordController) NotifyNewSub(userID int64, paymentProvider string, amount string) { + message := fmt.Sprintf("New subscriber via `%s`, after %s of signing up! 🫂 (%s)", + paymentProvider, c.getTimeSinceSignUp(userID), amount) + c.sendMessage(c.ChaChing, viper.GetString("discord.bot.cha-ching.channel"), message) +} + +// Send a message related to subscriptions. +func (c *DiscordController) NotifyBlackFridayUser(userID int64, amount string) { + message := fmt.Sprintf("BlackFriday subscription purchased after %s of signing up! 🫂 (%s)", + c.getTimeSinceSignUp(userID), amount) + c.sendMessage(c.ChaChing, viper.GetString("discord.bot.cha-ching.channel"), message) +} + +// Convenience wrappers over the primitive notify types. +// +// By keeping them separate we later allow them to be routed easily to different +// Discord channels. + +func (c *DiscordController) NotifyStartup() { + c.Notify(c.HostName + " has taken off 🚀") +} + +func (c *DiscordController) NotifyShutdown() { + c.Notify(c.HostName + " is down ☠️") +} + +func (c *DiscordController) NotifyAdminAction(message string) { + c.Notify(message) +} + +func (c *DiscordController) NotifyAccountDelete(userID int64, paymentProvider string, productID string) { + message := fmt.Sprintf("User on %s (%s) initiated delete after using us for %s", + paymentProvider, productID, c.getTimeSinceSignUp(userID)) + c.Notify(message) +} + +func (c *DiscordController) NotifyPotentialAbuse(message string) { + c.Notify(message) +} + +func (c *DiscordController) getTimeSinceSignUp(userID int64) string { + timeSinceSignUp := "unknown time" + user, err := c.UserRepo.GetUserByIDInternal(userID) + if err != nil { + log.Error(err) + } else { + since := time.Since(time.UnixMicro(user.CreationTime)) + timeSinceSignUp = t.DaysOrHoursOrMinutes(since) + } + return timeSinceSignUp +} diff --git a/server/pkg/controller/email/email_notification.go b/server/pkg/controller/email/email_notification.go new file mode 100644 index 000000000..01b946683 --- /dev/null +++ b/server/pkg/controller/email/email_notification.go @@ -0,0 +1,158 @@ +package email + +import ( + "fmt" + "strconv" + + "github.com/avct/uasurfer" + "github.com/ente-io/museum/pkg/controller/lock" + "github.com/ente-io/museum/pkg/repo" + "github.com/ente-io/museum/pkg/utils/email" + "github.com/ente-io/museum/pkg/utils/time" + log "github.com/sirupsen/logrus" +) + +const ( + WebAppFirstUploadTemplate = "web_app_first_upload.html" + MobileAppFirstUploadTemplate = "mobile_app_first_upload.html" + FirstUploadEmailSubject = "Congratulations! 🎉" + StorageLimitExceededMailLock = "storage_limit_exceeded_mail_lock" + StorageLimitExceededTemplateID = "storage_limit_exceeded" + StorageLimitExceededTemplate = "storage_limit_exceeded.html" + FilesCollectedTemplate = "files_collected.html" + FilesCollectedTemplateID = "files_collected" + FilesCollectedSubject = "You've got photos!" + SubscriptionUpgradedTemplate = "subscription_upgraded.html" + SubscriptionUpgradedSubject = "Thank you for choosing ente!" + FilesCollectedMuteDurationInMinutes = 10 + StorageLimitExceededSubject = "[Alert] You have exceeded your storage limit" + ReferralSuccessfulTemplate = "successful_referral.html" + ReferralSuccessfulSubject = "You've earned 10 GB on ente! 🎁" +) + +type EmailNotificationController struct { + UserRepo *repo.UserRepository + LockController *lock.LockController + NotificationHistoryRepo *repo.NotificationHistoryRepository + isSendingStorageLimitExceededMails bool +} + +func (c *EmailNotificationController) OnFirstFileUpload(userID int64, userAgent string) { + user, err := c.UserRepo.Get(userID) + if err != nil { + return + } + os := getOSFromUA(userAgent) + template := WebAppFirstUploadTemplate + if os == uasurfer.OSAndroid || os == uasurfer.OSiOS { + template = MobileAppFirstUploadTemplate + } + err = email.SendTemplatedEmail([]string{user.Email}, "team@ente.io", "team@ente.io", FirstUploadEmailSubject, template, nil, nil) + if err != nil { + log.Error("Error sending first upload email ", err) + } +} + +func getOSFromUA(ua string) uasurfer.OSName { + return uasurfer.Parse(ua).OS.Name +} + +func (c *EmailNotificationController) OnSuccessfulReferral(userID int64) { + user, err := c.UserRepo.Get(userID) + if err != nil { + return + } + err = email.SendTemplatedEmail([]string{user.Email}, "team@ente.io", "team@ente.io", ReferralSuccessfulSubject, ReferralSuccessfulTemplate, nil, nil) + if err != nil { + log.Error("Error sending first upload email ", err) + } +} + +func (c *EmailNotificationController) OnFilesCollected(userID int64) { + user, err := c.UserRepo.Get(userID) + if err != nil { + return + } + lastNotificationTime, err := c.NotificationHistoryRepo.GetLastNotificationTime(userID, FilesCollectedTemplateID) + logger := log.WithFields(log.Fields{ + "user_id": userID, + }) + if err != nil { + logger.Error("Could not fetch last notification time", err) + return + } + if lastNotificationTime > time.MicrosecondsAfterMinutes(-FilesCollectedMuteDurationInMinutes) { + logger.Info("Not notifying user about a collected file") + return + } + lockName := "files_collected_" + strconv.FormatInt(userID, 10) + lockStatus := c.LockController.TryLock(lockName, time.MicrosecondsAfterMinutes(FilesCollectedMuteDurationInMinutes)) + if !lockStatus { + log.Error("Could not acquire lock to send file collected mails") + return + } + defer c.LockController.ReleaseLock(lockName) + logger.Info("Notifying about files collected") + err = email.SendTemplatedEmail([]string{user.Email}, "team@ente.io", "team@ente.io", FilesCollectedSubject, FilesCollectedTemplate, nil, nil) + if err != nil { + log.Error("Error sending files collected email ", err) + } + c.NotificationHistoryRepo.SetLastNotificationTimeToNow(userID, FilesCollectedTemplateID) +} + +func (c *EmailNotificationController) OnAccountUpgrade(userID int64) { + user, err := c.UserRepo.Get(userID) + if err != nil { + log.Error("Could not find user to email", err) + return + } + log.Info(fmt.Sprintf("Emailing on account upgrade %d", user.ID)) + err = email.SendTemplatedEmail([]string{user.Email}, "team@ente.io", "team@ente.io", SubscriptionUpgradedSubject, SubscriptionUpgradedTemplate, nil, nil) + if err != nil { + log.Error("Error sending files collected email ", err) + } +} + +func (c *EmailNotificationController) SendStorageLimitExceededMails() { + if c.isSendingStorageLimitExceededMails { + log.Info("Skipping sending storage limit exceeded mails as another instance is still running") + return + } + c.setStorageLimitExceededMailerJobStatus(true) + defer c.setStorageLimitExceededMailerJobStatus(false) + lockStatus := c.LockController.TryLock(StorageLimitExceededMailLock, time.MicrosecondsAfterHours(24)) + if !lockStatus { + log.Error("Could not acquire lock to send storage limit exceeded mails") + return + } + defer c.LockController.ReleaseLock(StorageLimitExceededMailLock) + users, err := c.UserRepo.GetUsersWithIndividualPlanWhoHaveExceededStorageQuota() + if err != nil { + log.Error("Error while fetching user list", err) + return + } + for _, u := range users { + lastNotificationTime, err := c.NotificationHistoryRepo.GetLastNotificationTime(u.ID, StorageLimitExceededTemplateID) + logger := log.WithFields(log.Fields{ + "user_id": u.ID, + }) + if err != nil { + logger.Error("Could not fetch last notification time", err) + continue + } + if lastNotificationTime > 0 { + continue + } + logger.Info("Alerting about storage limit exceeded") + err = email.SendTemplatedEmail([]string{u.Email}, "team@ente.io", "team@ente.io", StorageLimitExceededSubject, StorageLimitExceededTemplate, nil, nil) + if err != nil { + logger.Info("Error notifying", err) + continue + } + c.NotificationHistoryRepo.SetLastNotificationTimeToNow(u.ID, StorageLimitExceededTemplateID) + } +} + +func (c *EmailNotificationController) setStorageLimitExceededMailerJobStatus(isSending bool) { + c.isSendingStorageLimitExceededMails = isSending +} diff --git a/server/pkg/controller/embedding/controller.go b/server/pkg/controller/embedding/controller.go new file mode 100644 index 000000000..ce086aadb --- /dev/null +++ b/server/pkg/controller/embedding/controller.go @@ -0,0 +1,273 @@ +package embedding + +import ( + "encoding/json" + "errors" + "fmt" + "strconv" + "strings" + "sync" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go/service/s3/s3manager" + "github.com/ente-io/museum/ente" + "github.com/ente-io/museum/pkg/controller" + "github.com/ente-io/museum/pkg/controller/access" + "github.com/ente-io/museum/pkg/repo" + "github.com/ente-io/museum/pkg/repo/embedding" + "github.com/ente-io/museum/pkg/utils/auth" + "github.com/ente-io/museum/pkg/utils/network" + "github.com/ente-io/museum/pkg/utils/s3config" + "github.com/ente-io/museum/pkg/utils/time" + "github.com/ente-io/stacktrace" + "github.com/gin-gonic/gin" + log "github.com/sirupsen/logrus" +) + +type Controller struct { + Repo *embedding.Repository + AccessCtrl access.Controller + ObjectCleanupController *controller.ObjectCleanupController + S3Config *s3config.S3Config + QueueRepo *repo.QueueRepository + TaskLockingRepo *repo.TaskLockRepository + FileRepo *repo.FileRepository + CollectionRepo *repo.CollectionRepository + HostName string + cleanupCronRunning bool +} + +func (c *Controller) InsertOrUpdate(ctx *gin.Context, req ente.InsertOrUpdateEmbeddingRequest) (*ente.Embedding, error) { + userID := auth.GetUserID(ctx.Request.Header) + + err := c.AccessCtrl.VerifyFileOwnership(ctx, &access.VerifyFileOwnershipParams{ + ActorUserId: userID, + FileIDs: []int64{req.FileID}, + }) + + if err != nil { + return nil, stacktrace.Propagate(err, "User does not own file") + } + + count, err := c.CollectionRepo.GetCollectionCount(req.FileID) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + if count < 1 { + return nil, stacktrace.Propagate(ente.ErrNotFound, "") + } + + obj := ente.EmbeddingObject{ + Version: 1, + EncryptedEmbedding: req.EncryptedEmbedding, + DecryptionHeader: req.DecryptionHeader, + Client: network.GetPrettyUA(ctx.GetHeader("User-Agent")) + "/" + ctx.GetHeader("X-Client-Version"), + } + err = c.uploadObject(obj, c.getObjectKey(userID, req.FileID, req.Model)) + if err != nil { + log.Error(err) + return nil, stacktrace.Propagate(err, "") + } + embedding, err := c.Repo.InsertOrUpdate(ctx, userID, req) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + return &embedding, nil +} + +func (c *Controller) GetDiff(ctx *gin.Context, req ente.GetEmbeddingDiffRequest) ([]ente.Embedding, error) { + userID := auth.GetUserID(ctx.Request.Header) + + if req.Model == "" { + req.Model = ente.GgmlClip + } + + embeddings, err := c.Repo.GetDiff(ctx, userID, req.Model, *req.SinceTime, req.Limit) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + + // Collect object keys for embeddings with missing data + var objectKeys []string + for i := range embeddings { + if embeddings[i].EncryptedEmbedding == "" { + objectKey := c.getObjectKey(userID, embeddings[i].FileID, embeddings[i].Model) + objectKeys = append(objectKeys, objectKey) + } + } + + // Fetch missing embeddings in parallel + if len(objectKeys) > 0 { + embeddingObjects, err := c.getEmbeddingObjectsParallel(objectKeys) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + + // Populate missing data in embeddings from fetched objects + for i, obj := range embeddingObjects { + for j := range embeddings { + if embeddings[j].EncryptedEmbedding == "" && c.getObjectKey(userID, embeddings[j].FileID, embeddings[j].Model) == objectKeys[i] { + embeddings[j].EncryptedEmbedding = obj.EncryptedEmbedding + embeddings[j].DecryptionHeader = obj.DecryptionHeader + } + } + } + } + + return embeddings, nil +} + +func (c *Controller) DeleteAll(ctx *gin.Context) error { + userID := auth.GetUserID(ctx.Request.Header) + + err := c.Repo.DeleteAll(ctx, userID) + if err != nil { + return stacktrace.Propagate(err, "") + } + return nil +} + +// CleanupDeletedEmbeddings clears all embeddings for deleted files from the object store +func (c *Controller) CleanupDeletedEmbeddings() { + log.Info("Cleaning up deleted embeddings") + if c.cleanupCronRunning { + log.Info("Skipping CleanupDeletedEmbeddings cron run as another instance is still running") + return + } + c.cleanupCronRunning = true + defer func() { + c.cleanupCronRunning = false + }() + items, err := c.QueueRepo.GetItemsReadyForDeletion(repo.DeleteEmbeddingsQueue, 200) + if err != nil { + log.WithError(err).Error("Failed to fetch items from queue") + return + } + for _, i := range items { + c.deleteEmbedding(i) + } +} + +func (c *Controller) deleteEmbedding(qItem repo.QueueItem) { + lockName := fmt.Sprintf("Embedding:%s", qItem.Item) + lockStatus, err := c.TaskLockingRepo.AcquireLock(lockName, time.MicrosecondsAfterHours(1), c.HostName) + ctxLogger := log.WithField("item", qItem.Item).WithField("queue_id", qItem.Id) + if err != nil || !lockStatus { + ctxLogger.Warn("unable to acquire lock") + return + } + defer func() { + err = c.TaskLockingRepo.ReleaseLock(lockName) + if err != nil { + ctxLogger.Errorf("Error while releasing lock %s", err) + } + }() + ctxLogger.Info("Deleting all embeddings") + + fileID, _ := strconv.ParseInt(qItem.Item, 10, 64) + ownerID, err := c.FileRepo.GetOwnerID(fileID) + if err != nil { + ctxLogger.WithError(err).Error("Failed to fetch ownerID") + return + } + prefix := c.getEmbeddingObjectPrefix(ownerID, fileID) + + err = c.ObjectCleanupController.DeleteAllObjectsWithPrefix(prefix, c.S3Config.GetHotDataCenter()) + if err != nil { + ctxLogger.WithError(err).Error("Failed to delete all objects") + return + } + + err = c.Repo.Delete(fileID) + if err != nil { + ctxLogger.WithError(err).Error("Failed to remove from db") + return + } + + err = c.QueueRepo.DeleteItem(repo.DeleteEmbeddingsQueue, qItem.Item) + if err != nil { + ctxLogger.WithError(err).Error("Failed to remove item from the queue") + return + } + + ctxLogger.Info("Successfully deleted all embeddings") +} + +func (c *Controller) getObjectKey(userID int64, fileID int64, model string) string { + return c.getEmbeddingObjectPrefix(userID, fileID) + model + ".json" +} + +func (c *Controller) getEmbeddingObjectPrefix(userID int64, fileID int64) string { + return strconv.FormatInt(userID, 10) + "/ml-data/" + strconv.FormatInt(fileID, 10) + "/" +} + +func (c *Controller) uploadObject(obj ente.EmbeddingObject, key string) error { + embeddingObj, _ := json.Marshal(obj) + uploader := s3manager.NewUploaderWithClient(c.S3Config.GetHotS3Client()) + up := s3manager.UploadInput{ + Bucket: c.S3Config.GetHotBucket(), + Key: &key, + Body: strings.NewReader(string(embeddingObj)), + } + result, err := uploader.Upload(&up) + if err != nil { + log.Error(err) + return stacktrace.Propagate(err, "") + } + log.Infof("Uploaded to bucket %s", result.Location) + return nil +} + +var globalFetchSemaphore = make(chan struct{}, 300) + +func (c *Controller) getEmbeddingObjectsParallel(objectKeys []string) ([]ente.EmbeddingObject, error) { + var wg sync.WaitGroup + var errs []error + embeddingObjects := make([]ente.EmbeddingObject, len(objectKeys)) + downloader := s3manager.NewDownloaderWithClient(c.S3Config.GetHotS3Client()) + + for i, objectKey := range objectKeys { + wg.Add(1) + globalFetchSemaphore <- struct{}{} // Acquire from global semaphore + go func(i int, objectKey string) { + defer wg.Done() + defer func() { <-globalFetchSemaphore }() // Release back to global semaphore + + obj, err := c.getEmbeddingObject(objectKey, downloader) + if err != nil { + errs = append(errs, err) + log.Error("error fetching embedding object: "+objectKey, err) + } else { + embeddingObjects[i] = obj + } + }(i, objectKey) + } + + wg.Wait() + + if len(errs) > 0 { + return nil, stacktrace.Propagate(errors.New("failed to fetch some objects"), "") + } + + return embeddingObjects, nil +} + +func (c *Controller) getEmbeddingObject(objectKey string, downloader *s3manager.Downloader) (ente.EmbeddingObject, error) { + var obj ente.EmbeddingObject + buff := &aws.WriteAtBuffer{} + _, err := downloader.Download(buff, &s3.GetObjectInput{ + Bucket: c.S3Config.GetHotBucket(), + Key: &objectKey, + }) + if err != nil { + log.Error(err) + return obj, stacktrace.Propagate(err, "") + } + err = json.Unmarshal(buff.Bytes(), &obj) + if err != nil { + log.Error(err) + return obj, stacktrace.Propagate(err, "") + } + return obj, nil +} diff --git a/server/pkg/controller/family/admin.go b/server/pkg/controller/family/admin.go new file mode 100644 index 000000000..aba7f428b --- /dev/null +++ b/server/pkg/controller/family/admin.go @@ -0,0 +1,251 @@ +package family + +import ( + "context" + "database/sql" + "errors" + "fmt" + + "github.com/ente-io/museum/ente" + "github.com/ente-io/museum/pkg/repo" + "github.com/ente-io/museum/pkg/utils/auth" + "github.com/ente-io/museum/pkg/utils/billing" + emailUtil "github.com/ente-io/museum/pkg/utils/email" + "github.com/ente-io/stacktrace" + "github.com/gin-gonic/gin" + "github.com/google/uuid" + "github.com/sirupsen/logrus" +) + +const ( + InviteTokenLength = 32 + InviteTemplate = "family_invited.html" + AcceptedTemplate = "family_accepted.html" + LeftTemplate = "family_left.html" + RemovedTemplate = "family_removed.html" + + HappyHeaderImage = "iVBORw0KGgoAAAANSUhEUgAAAN4AAADeCAYAAABSZ763AAAACXBIWXMAABYlAAAWJQFJUiTwAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAABxrSURBVHgB7Z1tkFRldscP3fPag8zA6PAiyzRYhhhXwXwxSSXSSJX7Bd++WLu+AbWrJrUmYpnkw+IWYMlaaq3gbtZUSSoOuruxdqsUZL9ICmhxdbMfsg66SUkstUGQ12EGZqanGaabnP/te2f6ve/tvi/Pvff8qi63u6en6el+/vec55zznGcWCZ5y5cqVnsuXL8dx4HY0Go1HIpF+/CyXy8VnzZrVg8eN5+J+jZdL4R9+zgg/dwRnHPw6R/X7g/z6I62trSk8ToJnzCLBFSYmJuIsgJU84OM8+FewEFYawiIPMISoC/M93OaHU52dnSkSHEeE5wCwTOl0OsGDOQHrxQM74ZXArGIIkm++x0eyo6NjUKyj/YjwbKBEaKtgzShAQIiwivx3JWOxWFKE2DwivAaB68gD8R4ehHfz3QSFiyT/7XtaWlqSbW1tgyRYRoRnARZbgk+r+FjPR5wEkNJFOCAiNI8Irw6wbHxaRyI2M2giZC9ghwRpaiPCqwDmbCy49SF1I+0iyccuFuAACWWI8AoYGxtbycGR9XxznV+ikD4gRXkRbhUrOIMIj6bnbptJrJvTJCkvwCSFnFALD+4k5edvCRLcJEV5AQ5QSAml8HTBwcLFSfCSFIVUgKESnghOWVIUMgGGQngyh/MNKQqJAAMtPD0H9xqJ4PzGAAU8ChpI4SEPd+nSpc183kiCb+HvbwendV4OogADJzwW3D3ZbPY1ycMFhhQF0P0MjPDErQw8AxQg9zNCASCTyTzBbslHJKILMijh+yidTgdi+uBriydWLrQk+djgZ+vnW4s3OTm5TqxcaEnwcVDPy/oS31k8iVgKhSDyyZZvq99WxftKeLpreZCk8kQoJsXHaj+5nr5xNQtcyzgJQjFxvwVefGHxOGq5XVxLwQyRSGRLe3v7VlIcpYWH+RyL7m2SAIrG+2N/pHdGfk+Hxj6hC9lx7bH+tj7teKh3Df3N7G+SoJEkxaOeygpP5nMzQHCPHH2Zjk6eqfk8CPDphd+hB+fdToLa8z4lhYcWDNFoFJYuTiFn28n/oGdPvWnpd55e8G3axAIUKMXj6F4Vu58pF1xBEIX9dLF01JjoAH4HvysQ2uYfHB8fv4cUQymLB9Fls9kBEujn5w9o7mUz7Lt+m8z7dDhe8GQsFttBiqCMxUNSXEQ3w7M2WKxmhRskON2wHWOMFEEJ4eEDYZdgCwkasHb1AilmwGvgtYQ8GGOqiM9z4YnoynnWxvnZszLXK0IV8XkqPBFdOXZZOwO8FtIRwgwqiM8z4YnoKuOEhRKrV47X4vNEeFi4KqIrx25rZ3CILZ5YvXIwBhFJJw9wXXjoiYKlHCSU8cbQfnIKsXqVQSTdC/G5KjxUpKAREQllwCodctAqidWrDlu+HSw+V3fxdU14qL1EGZh0/6qMG5UmYvUqg2J8Nghv6/XBruCK8KTguTaY1x1ywRqJ1atJnI+DECG5gFsWD+5lnISKbDtpvR6zUcTq1SSuL0NzHMeFp4dsEyRUBNbujfPOBVVKEatXl0Q6nd5ODuOo8CRtUB83rZ3BT8+8Q0J1OA6x0elIp2OrEzCvQ48UCaZUB9buT//nEfKCT2/cqS2cFSqDrmU8fm9xaiGtIxZPn6AeFNHVxgtrN/N/y1yvFsYYdirY4ojw9HldnISquD23K+WN8wdoRO/bIlQlzp6bI2VltgsP3X2lI1h9vLR2Bj87u5eE2jg137N1jif5OnN4ObcrpDvapc31evgsVMeJ+Z7dFk/ydSZQwdoBtAgUq1cffZ5na6mjbcJD6oAkX1eXfJXKJ6QKSC3IXM8UCTs7VdsiPLiYkq8zx8+H9juy9KdRxOqZJxKJbLarntMW4UF0kjqoDwb5Gwr2QBGrZw47Xc6mhYcoJl8JPFlM6Df2Xvi9UtbOQKyeJRJ29OlsKqopUUxrIJKpovCARDjNgyhne3v70mb25GvK4vF/jIBKnIS6ONXWwS7E6pkHLmezifWGLZ5u7b4kwRQqWzsDWL1TN/+SBNMsbTS317DFkyimeVS3dgawetIA1xINB1oaEp4EVKzhp8WnslDWEgnWQoIaoFGLp0wPetXxi7UzwHtF9FUwTUNasCw8WDuSgIopkBvzowX5p+P/Jnk98yR0TViiEYsn1s4kj5nYxVVF8J7/mcUnmMayJiwJT6ydOWAtHmXRveNjlw0VNo/KNl9miVu1epbSCfziSB/ESagKGgn9I1uLjyeCkWlBe4id/U/IBpf1SXFqYanZJ5sWnq5o6QJdAqzbMX1HnndG/suV/phecBsL76HeNXQzj62bzY+vsLGBxTdg5olWhBdaa4c5D3JcsGK4nbp0WruNx/w4h7MDlJZBgEi6r4gt027jsZv0c0hJsvBWm3miKeHpuYqDFGAgIIjpmCasM3x/RlwS4bMGhLeEXdR+7ZhP8fa+aZGGwFquZvEl6z3JlPAymQy6LSXIxxguYaHVwv2j+iG4R78uSogz3j4/aNbSlNWrKzw/1WQaVgtW6nD6Sxbb2LQVE6vlD4JgLaPR6C1tbW2DtZ5TV3jj4+MDqpaHQUw/O/OO41tcCepgBHkQZVW1IS97hy/HYrGabSLMWDwlgyr/woLbdupN31mytqEsLf3xkHb7yI+kk3OjQHSP991Fj19zJ6mGmfV6LbVeQNWEORK7KrZQqMfV+8ep7zdjFE3n6HJvlLyi/1+Htfdw5s6raOxP2siPYFqB0rZjPFd/YfH3SCX09Xrr+WbVnY9rCo8Vu45fhFQCH7bfRIdBPp8F17s/b53PremiM2tnk1d0fnWZWnXLO87C87MAf6ov3lVNfKydu6mG8Kq6mioGVVDp/4gPy5iW/+CM5mLmYhE6xYIbWuNt5A7vped3EzTvw7QmQHDyvjnaBcGv7Lt+m4rVNVUXylat1VSxDbtf14rh6gbX8rOnr/ZcdGCS3wss7hdP9dJptnaZb7SQ31H0gry+2g9qWTylgipYI3bfFz8iQaiGglavav1mRYs3OTm5khQLquwdkcWZQm0UXA0Sr7ZCvaLwpqam1pNioIRLEGrx/qg6rfELSFR6sKLw9IiMUkhZl1APRXO6FYtPymbVcDOz2WycBMdBmgG5vbkcYTSii5nFrTTBwQ6E+CcbzPU59bpCQ8DdjJdGN8uEx25mgi0eqQYqFYJk9eZyKH/hr0c1kRTScfyydsw5fEmLPFoN8Tv1un5A4RIytHwvyumVuZqRSEQ5NxPcHFtGQQHWaPGuC2XiKAQ/W/iriyykCfL6df2CqmOk0tStSHjDw8M9qi7/ubP7VgoCSF6jbMwsC399saaQnH5dP3GXumMkoe80NE2R8Nra2hKkKKhKvy0AfT/6fjNqacBr87UD9YMGTr2uX8DYULkvTDqdThTeLxIem8QEKcyLi7/n+4WSnccuk1V6TLiFTr2uX3i1/wlSmVJtlc7xVpHCYCGkasWwVuk4PkVWadMjk168rh/ABVnVwEoBRdqaFh7md6zKlaQ4D8273ffiE+wDlk7FNXmlQFuF87xp4XV0dCgvOoO/5w9addeiGhdXdmirFKyQ+UZr3edYXd+H94D34mcwBnAh9guF87zCPF6CfITxgfut2/HRv5urnZFTm31kUksBtNZx+SYW1189MPxXndS3t3ZUc3x5G11c0UEXWHCXfZxExzwfXo+fRAf0ed5u7bbxoF87iaGZ0bc+2+TrZkZd/zdJ8/eOaudSYJmwnKhetQmilMs3na0Y2YTgTq+9Slv06ncgunev3+bLNoGsrz2xWEzbP31aeGwGh1mRPeRDgiA+gBIvCLDQAn593xzTa/jw+4sHZtp8wKp9tb4nEIIDfhYdQA8WntJpLo8mvCBsqwzxYb2e38vK8onwUeoevEQnWHQjf9lp6feNvi4Q2/F13ZS1OJ9UFUQtf7XsB0FoiKutSjeEl6AAdIqG6GD5glDTCZexUdE087v1gEVG2whcELB63Q0gOlg6H6QM6pLL5e7t6urarX077Hv6JqJZCy++IFgo9FTBYSfNCMdJK4fXNsrTIEKnCZLoALubcZyNbyhBAcHtL2rJK8PaQFRvPYczYFUDIqgA4rt6v3Pz6qCJDkSj0RU4a8KLRCLdFCDc+sJwxe/ktACCGGgcFHRwgVn246GilQ1Y6dDxlfVytXoEUXQgm81q3qUmPPY7A+FqFuL0F2e0yANIRGc7g2vzZnOaY/GuEc2dRsoDKQ7M74w1fYtYfHaCAEoQRQfYyMVxnoVSMQ5xDlNAwQYmCLgctnmH1rm/S3Po/sL0fcx9Lq5o10L/EyYqTfwABNdXkF+E4M7y3zd0e0z7ewtzh7D4dqQtDNEFeY89pBRaOMISZ/NHQaVbz/3YLT7D3Rrm6B6sHwYn8mg40Gbh3JoYJ67bfddmAWLrOnKJeg+kp5PxpYIzwG1EN9EhO5puvuN4GEQHLl++HJ8Vhk0nASwfmp7utakF3E2PndTO/7t9fkGkb1QrAytMgMMKwBXFWVVLaIitsEcLMErMcHGpFSlF+RsuNs2AHYBeuPa7odhNFimFFqQSVOyxYjewfEjAPnr0J/TG+f3ULEatozEgYdmOr8sX/swZzNCcwxnNKsISGq4angMB4kDhs1dCxN4JxvvCUVhmBut2noVmXCzMYIfoXl3yDxQWeJ7XA4u3hW9vphBhl/jqgQGNgQ0RllpCANFmFrdoAkzzIMd9FETblYeDFcZ76Dw+Re0sNggO6/ZK6zlxEUHhtBWx2UXYRKezFcIboCq9/4IM9tZzey8GY0UCxAj3LlKlVQOEd3lelM+zNCuJ+7lY3ivJdkamlxW1Ds0sfm09lxc1xIYjMnGlaisICG1Mt7perlR4euF3aNOCb1PYYIs3AFezJwyuZinGF+6m+OCS5QMv+XkMBIIcGHKBHV9N8X0cOU0whmiamfFAoJO9sKKtmsgMV1eF+s2wis6gBclz1fbAcwsvxFcIhICjdEEqRGe0cjDcxehEXoiR9IwlK7WEWU1o+dfMcV5R1QLpsIuOgyvxFv4nlBbPwGvxVQKCMeZawekDlifsogPwMiN+XYNnJxgIGBCCs4jo8kBz/t+R0CZUtHxBAp3A/NCUyC0wCYiToAHxPd53Fwn2AksnopsBFg/phHBGVqqACpdbP90o24LZBAqdP71xJwnFBKMvgI2gwkWsnn1skrlzRUR4FUDbuDDUDLpBUDabsRsRXgVg9YK4FsxtsJGIXMAqI8KrQpD24/OK/na5eFVDhCcIHoBazRFJopczMqVezUjs0wzNe3uY2o/lC6xRizn25zEauncuTV0tKVk/0YLutnwW4ZVwIWt+d1U36N09zKIbKXoM4pvz2zGa/Yc0nX1gHl38a3f6XJrl47SveyQ7SUpczSocGvsjqUIl0RUCAc7feY5631ardU5KcqFVicDVJKEIuxsjNcP8nWdriq6QebtHlBIfihGkEKEcaC7CiPBKOKbAYIEVW/zcSc2VtIJq4ntfIc9BFTC9g8U7SkIRH3ts8VrPTWmi6+RgSiNAfLCU1Va4u4lK3oMqsOYuyByvAodGPyGvgOiuZdEhctkMsJQQb8s563uj24m4muVEo9FhCC9FQhFeDRZDdK02iQXi9Vp8H6e/IKGYXC53VIRXglcBAYhkyQ9P2CY6A8Nt9Up8+Cz9vmGo3WjBFVafBFcK8GJOMue3o5o4nJqTGeJr1n1tlE9knlcEB1cGI62trSkSpnE7sDJ33wUtB+d0IMQQXxcn291GAizF8BxvJDI+Pp4iYRo3qy2QGL/6F+fJLSDuRS+fprnv2ru7Tz0+npB5XiEwdpG5c+eOSBJ9hqOTp8kNrvnFkOnEuN1c/cshV3N9Ujo2A3J4Wh5Pv58iQcPpUrF8eddZ6tnnrtUpxc1Eu5SOzcBGbhBnTXiswMMkOD4XabQaxSmMRLvTSOnYDEie4xzR7wyS4GipGIIbSBd4FVmsBi4CeF9OB3ekdGyaJP4xhJciwTE30+7EuN0YOUQnc30S2cyDVALOmvAikYhYPHKmygKDWmXRGTidaBdXc5oU/tGE19nZmZLIpv05vK7/TmuDWXXRGTgpPikdy0c0oTXcLiySfo9CDAIAdpY2oRpl0U9OK7FCwAoQX78Dc1EpHSuOpRQKL0Uhxs45CBLjqEbxK7hYYM6Hi4edSOnYjHGbFh6bwd0UYuxyM+u1afATdreTkABLPqIJpltTZTKZwY6ODgorh0abj2giJ2Y1R3fttdfSK6+8QjfccAO5wYkTJ+jBBx/UzmZArg+gk1mzhL10jPVV7mrqpWOhjW4ea6JUrJnE+KZNm1wTHYDQ8X9aAeJDiVuzhLl0DNrSO/pplK5AD22ApVE3qNk2DW6Krpn/EyVuKLBuJlgU8tKxIm0VCY9VmaQQ0qjouk5k6brtw8pVozgFlhQtef409Xx5hVoy1nd3C3PpWKm2itoPT05OJsM4z2ukVGzZwSu09ADfiMyjqUVz6Pz585gnU5C56qqrqHuqmyL/nqNMD7uO90dodOEsS6+B0rH+ebdT2IjFYsnC+0UWD/M89kOTFDIaKRVbemDG5WppaaG+vj7q7e3VbgcNXIznz5+P8YEqp/xjPFtZetC61QtpZDNZOL8DZV3GcrncHgoZdlVVdHV1aQLEOQhAZBAb/qb29vayn7c0sJlwGF1NdjPLNFUmvDDm86zm8GoFGGDxYPkWLFjga+vX2dmp/Q1wL+0kjKVjPA6SpY+VCU+vJUtRSLC7VMygra2NFi1aRN3d3eQncLGAW3nNNdc4cuEIYelYisdCWZquWkPbXRQSGplzYHsss0B4EKAf3E+8V1i5Sm6lnYSpdKySmwmqjaAkhQQ39kkw3E9Vgy8InkBwEJ4RPHGSMM3z+PseqPR4xU+Z3c0kSR8W2zGCL3bPmxqlMHgC11iwnYpuJqh1eQuFu9kdddcFhMXDYIf76aX1g/jxHry4CLj9mXtFNTcT1BLeAIWAmzuXkhdAdBj4e/bsodOn3WkpCPB/vfTSS0U5Obfx6jN3G84Q7Kj2s6qfPKKbYUim97f10W2zv0le8dZbb9FTTz1F+/btIycZHx+n119/nR544AE6fNi7pnIrWHT4zENA0lhtXomal7ywJNMf7F1DXnLq1Cl64YUXtMMJ6wehPfbYY5rwvOb7fXdRSKg5VatZaDc8PNzDoeUv2fL1UMD51mebLJWOrflhlpzi4Ycf1o5mgZV7/vnn6cMPPyQnGI4T/eG7UdPPh6X79MadFAJSbO1q+tM1LR5qNykkQZZX+59QxgWCZcJi1c8//5waBS4s3EqnRGcVfLbvXr+NwgDPnZP1nlO3tHxycnJlNpv9iEIA8kuwfGbyTE5avELuuOMOWrdunVZNYgaIFSva3ZjHmbV4huhCMrcDS2vN70DdsBbyEGFZsWAMkIc8nvMVgqCLmeAL3EoIDnM5L4MnpSBwFTLRJeuJDphaTDUxMZHg00EKEbB62069qe2HXmgBezgHtbb7Vjr5/f8kt7nuuuvomWeeKbN+ENqLL76oBWncZMmfLaNzf7tAW2NXWH8Jkd3Z8xd0J39OXkaMPWK1XoBSE9OrGDOZzEFOCCYohGBQoZgaid8ePfm7du1a8goj+IIIKCKhXlm4m266iZ577jntdqXPKITUDaoYmC6dYNEhyJKgENKj2GBC8OWDDz7Q3Eu3rVw1VPuMPGKr2SeaLl1gJQ+Q1G8qA4IoqohO0EjpGjGF1Zoh04oWnCWX81dr+BBgSRuWhCdWTw0gunPnztGFCxdIUAJL1g40UiUrVs9DRkdH6euvv9Y6mkF4uD015Y/diAKMZU1YFh6UHcZOZF4DcZ05cwZlfEVuJh6H+IaGhkSA3mDZ2oGG1oVwhFOsnktAZIZlq9W3ExFOCBNnwVUa0kJDwkOCUKye81y6dEmLXJqdy8HiwfKdPXtWrJ8LRCKRgUasHWh4CTRbvQ18ko2tHQCigUs5MTFBjYDfw4EeKn7rcuYn2Btp2PNreAmyvn3zyyTYCoInsHKNiq4QMy6q0BgY+2ZqMqvR1Np/doW2yN7p9gC3slLwpFmMoIwEX2wlVautgxmaEp6+p94GEhoGIoPYUHfppGWS4IutbG3G2oGmu910dXXtlkBLY8CdhFsJ99INjOCL5P4ap5mAStHrkA3A6onLaR7D/fMq+mjk/jAHlNIz82CMNxNQKcQW4cHs8pVAcnsmwGCHlVMh4GG8F3E/TdO0i2lgW2PFjo6OHeJyVgfBExWtjOF+SvClNhjbsVisqYBKIbZ2NBWXs5zC4InKA9tY2+fWfNNnpOwOItoqPL0J7pMkaBgFzX4ZzMZFwmzwJUQitc3FNLC9hzciPmFPrFcraPYLZguvwyA8PVE+QDbjSPN8JNYphOv2zBY0+4V6uT9Vdj1yEKw82EIO4Ijw9Ea4q4M830Ojn0KsFjT7hVqF1/F4nIKKPnZX89TJkTHs2HYxQZ/vbdy4UdttB0lwDErVgyfNgr8Tltwo3oZ1v//++ynA2D6vK8TRfZqCPN9Db0v0ssSGjnYUNPsFzOvwN2/fvl3bZiygbLUzdVAJ0301myHoPTmPHDlCFy9epDAwZ84cWr58OQUV5Os4J72aHMYV4WHXIf5jsP9CnARBXVKU7wSdIodxRXiA3bE45dvAx0kQ1CNFLokOuLYXL/6gbDZ7r1S2CKqBMYmx6ZbogKubYM+ePXtQKlsE1YhGoxswNslFXN99Xq8CkMWzghKwtXuyvb19N7mM68IDuvhkGZHgNY6nDarhWnClEhxw2cKnzSQI7rPVqXIwM3gqPCDiEzzAU9EBz4UHRHyCi3guOqCE8ICIT3ABJUQHlBEeyGQyGznKtJ0EwX42OLGurlE8iWpWA31bcrmcJNkF29DHklKiA0pZPIOxsbGVnNR8m6S8TGgOrVrK7eS4GZQUHpDaTqFJUuRi7aVVlHI1C9E/sNXSMlCwir605xZVRQeUFR7AB6evjZIqF8EUWHiNMeNUywa7UNbVLAURTw68bOYPtIcEoQQEUVCAr1oQpRq+ER6QeZ9QhRQpPJ+rhNKuZin4YNny3SIbYgoGumup9HyuEr6yeIWw9VtP+UqXOAmhw2+uZSm+FR6A68kf/mtBbqQklIOoJfYy8JuVK8RXrmYpRtQTVz6pdgk++I6xcBXfuZ9FB3xt8QrRAy9b+FhHQuAIgpUrJDDCM5C5X7CAlUNPFC/aMzhJ4IQHYP34C9vIV8knSPAt+k49W1RPhjdCIIVnIO6nPwmaW1mJQAvPQNxPf6ALDotVkxRwQiE8AxGgsqQovzp8gEJCqIRnIAJUhhSFTHAGoRSegQjQG3SXclcYBWcQauEZsAATPBg2SwWMs4RpDlcPEV4BBVHQVSRW0BaQh4tEIrv4GGhra1OuBYNXiPCqADeUr9DrxAo2BqxbLpfbA3cyiHm4ZhHh1UEvxEbbwbtJrGA9Unzs4mMgyDk4OxDhWQDdz1paWtaLCItIUV5sSZm7mUeE1yAQIc9bEnzcHTZ31HAj+bxbLFtjiPBsgIXXk06nE9FoNMF3V/H9lRQgWGAIiryXzWaTsVgsKXO25hHhOQCEmMlkID6kKVaxdVjplyZNehQyyTeP8u3dHR0dgyI0+xHhuYSeqsCqCbioSFf0eClIXWApWDO2ZIf5forvD4rr6A4iPI+BdRwfH4+zm9oDUUKILIB+PM4/1g79NojXeJ0RwzLpZ9xP4T4L/CiLCyLDCu5UV1dXSqyYt/w/2W0QzOzEVkIAAAAASUVORK5CYII=" + SadHeaderImage = "iVBORw0KGgoAAAANSUhEUgAAAN4AAADeCAYAAABSZ763AAAACXBIWXMAABYlAAAWJQFJUiTwAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAABjjSURBVHgB7Z1bbBzXecc/7lLkcilblOXKVqGYK6OODSSxqKdcAFcrC2hefH0pnMKCKMB20db1BYH7UAmQBFh9SBBYbpIWUFyYgoLEaB58UV4SQNZaRmv7SbQco3ZdxENbjWxFEimLXFKUdp3vP5yhZod7nTkzc2bO9wNWe+Hyot3z3+96vtNHQqJ8+eWXI1euXCnhgtv5fL6Uy+VG8bV6vV7q6+sbwePuc3G/zY+z8A8/Z4afO4NrXPjnTDn3J/nnz6xatcrC4yQkRh8JsTA/P19iAYzxgi/x4t/MQhhzhUUJ4ArREeYbuM0PW0NDQxYJkSPCiwBYpmq1WubFXIb14oVdTkpgveIKkm++wZdKoVCYFOuoHhGeAnxC2wprRhkCQoRV5P9XpVgsVkSI4RHhBQSuIy/EB3gR3s93y2QWFf6/v9rf318ZGBiYJKFnRHg9wGIr89VWvozzpUQCsBwRTogIu0eE1wFYNr7aSSK2brBFyF7AQUnStEeE1wTEbCy4cUPdSFVU+HKYBThBwgpEeB5mZ2fHODkyzjd3piULmQIsWhLhfrGC1xDh0XLstpfEukVNhZYEWCHDMVp4cCdpKX4rkxAnFi0JcIIMxUjhOYKDhSuRkCQWGSpAo4QngtMWiwwToBHCkxguNVhkiAAzLTynBvciieDSxgRlPAuaSeGhDnf58uW9fP0UCamF37+DXNZ5PosCzJzwWHAP1Gq1F6UOlxksyqD7mRnhiVuZeSYoQ+5njjLAwsLCk+yWnCQRXZZBC9/JarWaifAh1RZPrJyxVPiyK83WL7UWb3FxcadYOWMp8+W4U5dNJamzeJKxFLwg88mWb3/adsWnSniOa3mcpPNEaMTiy7Y0uZ6pcTU9rmWJBKGRUtoSL6mweJy1fE5cS6EbcrncvsHBwf2kOVoLD/Eci+5lkgSK0BsV0jzrqa3wJJ4TQmKRxnGfljEeRjCQiE4IR4kvxzk3oOWMU+2EhyQK++kiOkEFGJt/fG5u7gHSDK1cTYiuVqtNkCAohvMFTxeLxYOkCdpYPBTFRXRCVHC54TmsMdIELSweXhB2CfaRIESMLuWGxIUnohPiRgfxJSo8EZ2QFEmLLzHhieiEpElSfIkIz9m4qk2GSTCXfD4/PjAwcJhiJnbhYSYKW7qXSRA0IQnxxSo851CQ4zKISNAJ7OXjdbktzvP9YhOe9F4KmmNRjL2dsQhPRKeGN2d/R6/NvEMnZt+ji7U5+7HRgfX2Zce67XTX6q+TEAqrUChsiWM3e1zCg+jKJAQCgnt06nmaWjzb9nkQ4J4N36OHb7ibhMBU2Opto4iJvGXMadMpkxCIA2d+SX/10e6OogN4DgSK7xECU65Wq89RxERq8aRsEA4I6NnPXqIg7Ln5IdrN1k8IRtSZzsiEh7gOM1IkgxmMn1943bZeYfjtbQck7gsI4jxev1uiSrZE4mpiZANfSdkgBM8qcBfDCtdk3DXsXCsnEuE5cV2JhEDA2nUT03UCPwM/SwhMiT23SLYSKXc1nem+L5IQmDvef1SJ8AAynR987WckBCeKeE+pxXPqddpsNkwjqqydC34WyhFCcOr1+kFnbStDtasJS1ciITDPRlAKeFbKC6Fw4jylXpwy4aF0QFKvC4Vqa+dygi2eWL3QlFVOqlYiPJhh2VsXniPnj1FUiNULTy6X26vK5VQiPIhOSgfhgFU6EaFVEqsXHpUuZ2jhIYvJnwQ7SQhFHG1eYvWUUFYxpzOU8CSLqQbEdSdisEZi9dTA5YUXwxbWQwmP3UskVEokhOLAmWD9mEEQqxceiC5sYT1wAd2xdh+TEApYOxTM40R6OJWxKWgvZ2CLJ1lMNcRp7Vx+fPY1EpQQONESSHiSUFEDrN2RC9GVEFpx9OI7kdQLDaTMWihTAIJaPEmoKCAJa3ftd0usp4hAWuhZeE4TdImEUCRl7VyOXHidZpy5LUIoyo4meiKIxRNrp4AkrZ3LT/94lAQl9KyJnoQn1k4NSVs7FyRZxOopodSr1evV4om1U4AO1g5gRKBYPWX0pI2uhSfWTg1LXSrvkS6I1VNGT1avF4sn1k4BPz9/TKtUvlg9pXRdYutKeE6tokRCKLDIj2g4A0WsnjK6rut1Jby+vj6xdgrQtXAtVk8pXWmlY6+m9GSqQ+UQI9WsyQ/bQ5FG+FoIRz6f39Lp5KGOFk96MtUQ1VgHVYjVU8fVq1fHOz2nG4sHa1ciIRQ6WzsXWL3P7vwFCeHAFOrBwcFN7U4damvxpISgBt2tnQusngzADY+zX2+83XPaCo8VKzsQFJCmzaeyUVYNrJ372329pfCcQ0fKJIQiLdbOBX8rsq9CaMrtJpK1FB6LTtkMQVNBbSyNFuSZ0y9IXU8N462+0FJ4nUyl0Jm/7eIUVx3B3/xPLD4hNC1DtabCW1xcHCNJqgQG1uIxFt1rKXbZ0GHzmBzzFZZSq06WpsLrpg4hNAfj87770W4tW8N6Bf8HlEFkJGAoys0ebFrHk9pd98C6feKcyPPazNuxzMdMgr9c/XXasW473Tm0yb4IXWMNDa18wVYID25mrVY7ScIyiHlQ4zrFn0e4bV3+3L6Nx0wdGoTWMggQRffNxVvt23jsG8610MCKMYArhIcTUTix8hwZBgQEMX1iC+ss378mLsnw9QaEd8vAevtQzNGBm6g0uH5ZpCZaS64QPF0sFg96H1shvIWFheNZrN+5LqHXauH+lHMR4mPUESXEWRq8yQRrWWGLt837QIPwpqenRwqFwjSlFNdqwUq9W/2YxTa7bMXEaqWDrFpL1tVab+9mg/BwCkoul3uZUgLE9NOzr0V+xJWgD26SByPoIc60UK/XHxweHn7Fvd/v/SIrskwp4ScsuAOfvSSWzDDcD1mI7vH199Hjf3YvpQFHW8vCa7B4nFg5yU8YI81BYTcLdTIhPP/IwvvBxkdIdzhvMskJli3u/eUCOuK7NIgOfYQiOsHlx388mor2NmjLe6besvA4+NNedOj0/4nskhZ8QHxp6K5hj7Ls3va2jJVJc2SvmNCKR1PQV+rNoeQ8D24ljZGjpYR2TDlte5pTcm8sC4/TnVq7mkdnZHOm0B7dd4NwqW7ZuNnCw05ZtnihDlOPGrRwCUI73rykz2j8ZjizWEq47Vq8EmmOuJlCJ9JQ03U9S1t4rETtM5qCkAXYsyzh2rV4ZdKcNLUHCcmQhjWSz+c349oWHgd9a0hz7izeSoLQjjSskVqtds3V1D2jCe5d800ShHbcl4I1wkauZF87rWJaZzQButJxEYRmYG3clYL1gcwmLrnh4eESpYQfbnxExgoITTk0+iSlhStXrpRy7HNqb+1csBEyDZ3oQrzgAzlNyTdbeGkrJey44W4Rn7AMLF1a9uS5cJw3kktDfOcHe7DS5FoI0YA1gA/iFFJCVrNEKQQvuIjPTBDnp1h0sHij/ciwsNWjNIIXfjPHfZjcrEO70E1HL9G616tUG+qjj7+/jhbX5ds+f+B8jTZOzNDw/y7S2XtW0+f3Xke9sva/q7ThV5fs33n2nuto+jtDHb9n9N+n6frJBZr76gBN/d1aqhU7HgysDRDdb247kPoxgbk0FM/bgTcAb0TS2c7VLJ71v56lfLW+LKhOrP/1JVt0S7dnaejTK9Qr3t+54Vdf2Lfbsfatqi06gN994+vpmVmTFdFx3byU439SF+P5ccWXZGYLFscLFrUrqlYMf9j49dz8l9Qrec/3QHSdhNTpb9IVvLdZEB2w63hpTK40I2nxzX9lle26eYHr2QpYSFgpF7h7/u/vhvPbiw331x2rtrV6frHPfnWQdCdLogPQXHqc+y5w36CkxOeP0WBdXLfOzwi7fF6CiA6cu3u4IUaD6Db85xdNn4u/xSv2KxyDBv29cZH0exoVqc1qtiLJNwqL2L+QNx6+2LDYAe77Lc/0t4sUBIgOiRkva9+ab+pS3nis0Q2dFdElQuYsnkuSb9jp8ZEVFmj03xqn4kMAfsvzxVhwl+/c9mFa2Ngwm9jOXHp/xxq2dn4xnt+ub/tdVkUH7BiPMkpSbxxKCH4LVDh9hW790XlbCEj/r/NZns/v6b2M4OfTXY2hOgS/iX8nMqX4vTf73M+52wfsuFRHdEiWRU3f/HyAVFqKwAEmqPO9a5+1GR8QWjcZRFi7D/5FzQKDJW0V3/n5kH9npzpjEuhSHoqazFo8lzVO7WdzzBkxFKavdLGwPx1Xl1SGy3muC/cRSSARXbJkXnjAFV+cm2kR5/3+++tWxF5eIDrVWcUzf3192w4YCNPvCusATgAyRXQg866mn8em/pWOXDhGcYIs443HZqnw6VWqsyDnv9JvC2R+Y3QxFmI7uJ1Dp69SjuM9xHTT3yp21VIWNxDdoVueIJPoq1ar01kpondLEuITmmOi6EDOe0qlKRwafYL2bPgeCcmC98BE0TGWETFeM3bf/JCIL0Hw2uM9MBXsQDfO4rmI+JLBdNFBc9gWZKzwgIgvXkwXHUB4B4s3RYYj4osHEd0SrLmL/STYuAtCDr+MBkwCS9tQoqjI5/PTSK5YJNhAfI+vv48EtcDSieiuUa/Xp2DxLBKW2cPiOzrztjbHgmHD7HWTC7T6w0VadaG2vMkVnTHoikGj86WxgrZbfNDoLO5lI0iu9LP6kGEhYQm0l8HqPXP6BUoSzEZZf3R2xV4+FwjQHS+B5mh3V8Tc7YNa9WHulth5BZxcmexbXFwcq9VqJ0lYBjsa7nj/0UQml3knjwXBFeD0d4JtrFXNmTt/IWP3fXCMtyU3NzdnkdAArF4Se8GwV+8vnj0XaiCRLdzDF+n2fz7b0lrGBQ4SEdGtZNWqVVb/2rVrZ6rV6oxp/ZqdwFlrce7hw2AkjOrzg6ZqxG/T3x6y4zl3qxE21w6cq9H17y6wYOdXfB9EBxHD+nWzVSgKRgflMFE/qOHh4pYTLL7IccwJ0Up0X3DSBLsYmsVsCxtX2Rc8B4NsR96apxvYYq7yWDl38BGugwzLFdTDiZVJXNtZFVbguyQ0MHM1nvgOk7/8ooOV+wMLDptpu0mUuHEd9v812/aDn48ZLJ2G3QrRg+I5rnPOnUkSGrhYm6WoceMxL3V7A+0NgQYRQYCnd47YVtI/lh0Cv/VHF2IV36lqvOM2UkIF/7jCs0ho4MTs7yhqMIzILwSILuwGWcR0/7fnxhWjJ5aGLsUnPkuTWqhOoJSAa1t4XMcTi+chjqQK4jp/1vEPCnelw/rB9WwlvjhAWUaXRgSNsPCPLbyhoSHL5O1Bfj6JeLFAcP64DllL1XMuIb6P2PL5575AfN0cqqKCN2PwHNICspnQGm57A4E3SLA5FbHFwylBXmCVzkaUdWw1dAlzYNqd7aCKuMcq6ow3l+IVnkWCzYlL71FUoPfSX3fDQNso27xc8fndTlhd/1h31Yir2cCycVsWHpvBV0iwiXKxrPdZGXv6VwyTv9qJrxDgXL5uOVX9PQnLVNwby8JbWFiQBAtFmxBY3eTMPKT/4wJW1fr7tSvPdoiwxofXUofTenWgUCisdDXROib1vGhjEv/RXEioxL2TAN0uqPN5QbIH4ouK9yTOs+M770Q//34g4xMsUSVWlg4saYztktpBAMH7+zfd7UVRIAkWmwZtNQiPVVkhw4mq2wLNzF6SPhQSVs//+9HXGUW8d2pe4jy/thqEt7i4WCHDmVr8nKJg7X81upkqjuYKi/8sP/DnXZ421AvSOkZULBYr3vsNrzriPPZDK2QwUbWKFU5fbbiPbGbSLPV2rml4LMxewFZI6xhV/BPbV8x8qNfrr5KhRBmLeGMq3NZlPAO2Ffn/NtWY3jrGbuYKTa0Y7+fU854jA4myVQwxFRY5mNNsMFEcfxtax0ZvuJtMpL+/v+J/bIXFc3rJLDKQqHckYFHrJjqXqP82gzOb1sDAwIoyXavxYofJQKTLIjpMdTWbuZmglfAqZCCnpN4UGaZ+qLGbOdHs8abCY3ezQoa5m0gASGtTdBjaOtbUzQTtJtka5W5Kd0X0mNY61srNBO2EN0EGIW5m9Jj24cYVgoOtvtZSeMhumlRMP3FJdkpHjWGtYxV3t3kz2h6aYFIx/ZOIWsWEaxjWOtY2VGsrvMuXL0+YMotFYrzoMah1zGJrN9HuCW2Fh95NMiDJIqKLB1Nax3K5XKXTczqeCIs6RK1We5IyzCcpWQw4BOSu675Bm4c22YequOl5tGMdOX8sFYvahNYxDtH2d3pOR+GhDrGwsFBhl7NMGeWE5iPoIDicM4frll+/+SH7//HY1PNaCxDexcOUadomVVy6OpGSRddRwWlG164KWLXf3HbAvrQSnRc854Ov/YwOjT6ZyDFj3WCAq9mVVvqoS9jqHc+q1dtw6m+06qrAmXK7FZwbfuTC63TgzC+1Wuz4QMCHQ0ZBUmVTN0/s+gxmFl0mkyw6tYq5gvsfXphhRQd2cCyFRb6Hf6YuFjDjrWNde4ZdWzwwP2+n/0qUIRAXffej3ZQkENw/rL/PFltUJ6hiwR/47CU7CZM0v2XX+a4uXOeU0bW1A11bPIfMxXpJZzR3rLub3r7jIO3h5EiUxxbD4h265QnbAu5Yt52SJKNxXk/a6El4TlHQIiE0SIQgaXLolngTIToJMEN0LJj76dXigUxZvTX5eM8HdwXXbaYyKlwB4u+IO/6L+zWPgZ410VOM55KlDCfcnjvef5SiBov7BxsfofvWfJN0JM4MKKytruWOAPQU27kEsXiZquthAURpeRC3/ZAFh8Wmq+iAmwGNugbodt1kiEBaCCQ87FDP0pahhyOIdVSXBuICAnwHyZ6IShDI3maFXC430Wts5xLI1QRcWijxVWa6i1FSUNU65ha/R1Iey6guQWSweL6pm/awZgQWHqhWqwfZ8mWigRqLDOILE+OgNLD7Zn2K1apQIUC3/S0rrw2HW88Xi8WnKCChhDc9PT0yODj4MYsvvkPeIiSo+Do1MWcFvC7P/P9/0NGZt3v6vqyJjpZKatuCWjsQSnhgbm7uAfZ1X6aM0MunuymC83P04jv0zOkXuvqAwmujc9N2QHYFje1cQgsPZLGB2hUgzkP3LjDEbfdwdhLFZ9ME5wcCxAcU9th5+y8hsntHvkX38uuUtdcICRX28nZRSJQID4kWFt7JrLicfrCo0EyNwm/aEyZRYcJrhDEovMa3hHExXZQID7DVe4r/MCMPOxHMgNf305xQOUgKUCY8kOU9e4LZoG5dKBS2kSICFdBbwaLbZcpUMsEoLKxtUohS4TlDcJ8mQcgW+1XEdV6UCg8gzYriIglCBsBaDls6aIbSGM8FhXX2h09SxnarC8Zh8Tre4j+/XAXKLR5wBuFuk3hPSCvO2t0WhehAJMIDEu8JKUd5XOclMuEBifeElLJfVb2uFZHEeH6kviekBdX1ulZEavFc5ufnHyQZkiToj/J6XStisXjA2Th7nCTTKeiJRSG3+vRCLBYP4D9Uq9UelEynoBtYk1ibcYkOxCY8sHr16knJdAq6kc/nd2FtUozEKjzgdAHE4kcLQiew42BwcPAVipnYhQcc8WX66C8hFUReNmhFbMmVZnDCZR9f7SVBiB8UyPdRQiQqPCDiExIgUdGBxIUHRHxCjCQuOqCF8ICIT4gBLUQHtBEekLktQoSEHsmnkkSymq0oFAoH6/W6FNkFZThrSSvRAa0snsvs7OwYFzUxJLdEghAcu1sq7uJ4N2gpPCC9nUJILIqx97JXtHI1vTgv2LYsHQcmxIOztWeLrqID2goP4IVz9kZJl4vQFdh4jTUT1cgGVWjravpBxpMTL3uzOiZeCIczXv1p3ZIorUiN8IDEfUILLNI4nmuG1q6mH7ywbPm2yBwXwcVxLbWO55qRKovnha3fOC11upRIMI60uZZ+Uis8ANeTX/wXZZCSWSBridkoabNyXlLlavpxs5745JNul+yD9xgbV/Gep1l0INUWz4uTeNnHl50kZI4sWDkvmRGei8R+2QJWDjNRkhjPECWZEx5wjoZ+ij8lnyQhtTgn9ezTvRgehEwKz0Xcz3SSNbeyGZkWnou4n+nAERw2q1Yo4xghPBcRoLZYtLQ7fIIMwSjhuYgAtcEiwwTnYqTwXESAyeC4lIdNFJyL0cJzYQGWeTHslQ6YaDEphuuECM+DJwu6lcQKKgF1uFwud5gvEwMDA9qNYEgKEV4L4IbyJ/ROsYLBgHWr1+uvwp3MYh0uLCK8DjiN2Bg7eD+JFeyExZfDfJnIcg1OBSK8HsD0s/7+/nERYQMWLYmtIrFb94jwAgIRctxS5sv9prmjrhvJ16+IZQuGCE8BLLyRarVazufzZb67le+PUYZggSEp8katVqsUi8WKxGzhEeFFAIS4sLAA8aFMsZWtw1hahjQ5WcgK35zi268UCoVJEZp6RHgx4ZQqsGsCLirKFSNJCtIRmAVrxpbsXb5v8f1JcR3jQYSXMLCOc3NzJXZTRyBKCJEFMIrH+cv2xbkNSm1+zoxrmZxr3LdwnwU+xeKCyLCD2xoeHrbEiiXLnwDnkx8SBNVTNQAAAABJRU5ErkJggg==" + + // FamilyPlanSub is the common subject user for all family plan email + FamilyPlainHost = "https://family.ente.io" +) + +// CreateFamily creates a family with current user as admin member +func (c *Controller) CreateFamily(ctx context.Context, adminUserID int64) error { + err := c.BillingCtrl.IsActivePayingSubscriber(adminUserID) + if err != nil { + return stacktrace.Propagate(ente.ErrNoActiveSubscription, "you must be on a paid plan") + } + adminUser, err := c.UserRepo.Get(adminUserID) + if err != nil { + return err + } + if adminUser.FamilyAdminID != nil { + if *adminUser.FamilyAdminID != adminUserID { + return stacktrace.Propagate(ente.ErrBadRequest, "Must not be a part of a different family") + } else { + logrus.Info(fmt.Sprintf("family is already created for %d", adminUserID)) + return nil + } + } + err = c.FamilyRepo.CreateFamily(ctx, adminUserID) + if err != nil { + return stacktrace.Propagate(err, "") + } + return nil +} + +// InviteMember invites a user to join the family plan of admin User +func (c *Controller) InviteMember(ctx *gin.Context, adminUserID int64, email string) error { + err := c.BillingCtrl.IsActivePayingSubscriber(adminUserID) + if err != nil { + return stacktrace.Propagate(ente.ErrNoActiveSubscription, "you must be on a paid plan") + } + adminUser, err := c.UserRepo.Get(adminUserID) + if err != nil { + return err + } + if adminUser.FamilyAdminID == nil { + return stacktrace.Propagate(ente.ErrBadRequest, "admin needs to create a family before inviting members") + } else if *adminUser.FamilyAdminID != adminUserID { + return stacktrace.Propagate(ente.ErrBadRequest, "must be an admin to invite members") + } + + members, err := c.FamilyRepo.GetMembersWithStatus(adminUserID, repo.ActiveOrInvitedFamilyMemberStatus) + if err != nil { + return stacktrace.Propagate(err, "") + } + + if len(members) >= maxFamilyMemberLimit { + return stacktrace.Propagate(ente.ErrFamilySizeLimitReached, "family invite limit exceeded") + } + + potentialMemberID, err := c.UserRepo.GetUserIDWithEmail(email) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + return stacktrace.Propagate(ente.ErrNotFound, "invited member is not on ente") + } else { + return stacktrace.Propagate(err, "") + } + } + if potentialMemberID == adminUserID { + return stacktrace.Propagate(ente.ErrCanNotInviteUserAlreadyInFamily, "Can not self invite") + } + + potentialMemberUser, err := c.UserRepo.Get(potentialMemberID) + if err != nil { + return err + } + + if potentialMemberUser.FamilyAdminID != nil { + return stacktrace.Propagate(ente.ErrCanNotInviteUserAlreadyInFamily, "invited member is already a part of family") + } + potentialMemberSub, err := c.BillingCtrl.GetSubscription(ctx, potentialMemberID) + if err != nil { + return stacktrace.Propagate(err, "") + } + if billing.IsActivePaidPlan(potentialMemberSub) && !potentialMemberSub.Attributes.IsCancelled { + return stacktrace.Propagate(ente.ErrCanNotInviteUserWithPaidPlan, "") + } + + inviteToken, err := auth.GenerateURLSafeRandomString(InviteTokenLength) + if err != nil { + return stacktrace.Propagate(err, "") + } + + activeInviteToken, err := c.FamilyRepo.AddMemberInvite(ctx, adminUserID, potentialMemberUser.ID, inviteToken) + if err != nil { + return stacktrace.Propagate(err, "") + } + go func(token string) { + notificationErr := c.sendNotification(ctx, adminUserID, potentialMemberID, ente.INVITED, &token) + if notificationErr != nil { + logrus.WithError(notificationErr).Error("family-plan invite notification failed") + } + }(activeInviteToken) + return nil +} + +// RemoveMember verify admin -> memberID mapping & revokes the member's access from admin plan +func (c *Controller) RemoveMember(ctx context.Context, adminID int64, id uuid.UUID) error { + familyMember, err := c.FamilyRepo.GetMemberById(ctx, id) + if err != nil { + return stacktrace.Propagate(err, "failed to find member for given id") + } + if familyMember.AdminUserID != adminID { + return stacktrace.Propagate(ente.ErrPermissionDenied, "ops can be performed by family admin only") + } + if familyMember.Status == ente.REMOVED { + return nil + } + if familyMember.Status != ente.ACCEPTED { + return stacktrace.Propagate(ente.ErrBadRequest, fmt.Sprintf("can not remove member from %s state", familyMember.Status)) + } + err = c.FamilyRepo.RemoveMember(ctx, adminID, familyMember.MemberUserID, ente.REMOVED) + if err != nil { + return stacktrace.Propagate(err, "") + } + go func() { + notificationErr := c.sendNotification(ctx, adminID, familyMember.MemberUserID, ente.REMOVED, nil) + if notificationErr != nil { + logrus.WithError(notificationErr).Error("family-plan remove notification failed") + } + }() + return nil +} + +// RevokeInvite revokes a family invite which is not accepted yet +func (c *Controller) RevokeInvite(ctx context.Context, adminID int64, id uuid.UUID) error { + familyMember, err := c.FamilyRepo.GetMemberById(ctx, id) + if err != nil { + return stacktrace.Propagate(err, "failed to find member for given id") + } + if familyMember.AdminUserID != adminID { + return stacktrace.Propagate(ente.ErrPermissionDenied, "ops can be performed by family admin only") + } + if familyMember.Status == ente.REVOKED { + return nil + } + if familyMember.Status != ente.INVITED { + return stacktrace.Propagate(ente.ErrBadRequest, "can not revoke invite in current state") + } + err = c.FamilyRepo.RevokeInvite(ctx, adminID, familyMember.MemberUserID) + if err != nil { + return stacktrace.Propagate(err, "") + } + return nil +} + +func (c *Controller) CloseFamily(ctx context.Context, adminID int64) error { + familyMembers, err := c.FamilyRepo.GetMembersWithStatus(adminID, repo.ActiveFamilyMemberStatus) + if err != nil { + return stacktrace.Propagate(err, "") + } + if len(familyMembers) != 1 { + msg := fmt.Sprintf("can not close family with %d members", len(familyMembers)) + return stacktrace.Propagate(ente.NewBadRequestWithMessage(msg), "") + } + err = c.FamilyRepo.CloseFamily(ctx, adminID) + if err != nil { + return stacktrace.Propagate(err, "") + } + return nil +} + +func (c *Controller) sendNotification(ctx context.Context, adminUserID int64, memberUserID int64, newStatus ente.MemberStatus, inviteToken *string) error { + adminUser, err := c.UserRepo.Get(adminUserID) + if err != nil { + return stacktrace.Propagate(err, "") + } + memberUser, err := c.UserRepo.Get(memberUserID) + if err != nil { + return stacktrace.Propagate(err, "") + } + templateData := map[string]interface{}{ + "MemberEmailID": memberUser.Email, + "AdminEmailID": adminUser.Email, + } + if newStatus == ente.INVITED { + if inviteToken == nil { + return stacktrace.Propagate(fmt.Errorf("invite token can not be nil"), "") + } + templateData["FamilyInviteLink"] = fmt.Sprintf("%s?inviteToken=%s", FamilyPlainHost, *inviteToken) + } + var templateName, emailTo, title string + var inlineImages []map[string]interface{} + inlineImage := make(map[string]interface{}) + inlineImage["mime_type"] = "image/png" + inlineImage["cid"] = "header-image" + + if newStatus == ente.INVITED { + templateName = InviteTemplate + title = "You've been invited to join a family on ente!" + emailTo = memberUser.Email + inlineImage["content"] = HappyHeaderImage + } else if newStatus == ente.REMOVED { + emailTo = memberUser.Email + templateName = RemovedTemplate + title = "You have been removed from the family account on ente" + inlineImage["content"] = SadHeaderImage + } else if newStatus == ente.LEFT { + emailTo = adminUser.Email + templateName = LeftTemplate + title = fmt.Sprintf("%s has left your family on ente", memberUser.Email) + inlineImage["content"] = SadHeaderImage + } else if newStatus == ente.ACCEPTED { + emailTo = adminUser.Email + templateName = AcceptedTemplate + title = fmt.Sprintf("%s has accepted your invitation!", memberUser.Email) + inlineImage["content"] = HappyHeaderImage + } else { + return stacktrace.Propagate(fmt.Errorf("unsupported status %s", newStatus), "") + } + inlineImages = append(inlineImages, inlineImage) + err = emailUtil.SendTemplatedEmail([]string{emailTo}, "ente", "families@ente.io", + title, templateName, templateData, inlineImages) + if err != nil { + return stacktrace.Propagate(err, "") + } + return nil +} diff --git a/server/pkg/controller/family/family.go b/server/pkg/controller/family/family.go new file mode 100644 index 000000000..7f94b9cad --- /dev/null +++ b/server/pkg/controller/family/family.go @@ -0,0 +1,141 @@ +package family + +import ( + "context" + "errors" + "fmt" + "github.com/ente-io/museum/pkg/controller/usercache" + "github.com/ente-io/museum/pkg/utils/time" + + "github.com/ente-io/museum/ente" + "github.com/ente-io/museum/pkg/controller" + "github.com/ente-io/museum/pkg/repo" + "github.com/sirupsen/logrus" + + "github.com/ente-io/stacktrace" +) + +const ( + // maxFamilyMemberLimit number of folks who can be part of a family + maxFamilyMemberLimit = 6 +) + +// Controller exposes functions to interact with family module +type Controller struct { + BillingCtrl *controller.BillingController + UserRepo *repo.UserRepository + FamilyRepo *repo.FamilyRepository + UserCacheCtrl *usercache.Controller +} + +// FetchMembers return list of members who are part of a family plan +func (c *Controller) FetchMembers(ctx context.Context, userID int64) (ente.FamilyMemberResponse, error) { + user, err := c.UserRepo.Get(userID) + if err != nil { + return ente.FamilyMemberResponse{}, stacktrace.Propagate(err, "") + } + if user.FamilyAdminID == nil { + return ente.FamilyMemberResponse{}, stacktrace.Propagate(ente.ErrBadRequest, "user is not part of any family plan") + } + return c.FetchMembersForAdminID(ctx, *user.FamilyAdminID) +} + +func (c *Controller) FetchMembersForAdminID(ctx context.Context, familyAdminID int64) (ente.FamilyMemberResponse, error) { + familyMembers, err := c.FamilyRepo.GetMembersWithStatus(familyAdminID, repo.ActiveOrInvitedFamilyMemberStatus) + if err != nil { + return ente.FamilyMemberResponse{}, stacktrace.Propagate(err, "") + } + memberUserIDs := make([]int64, 0) + for _, familyMember := range familyMembers { + memberUserIDs = append(memberUserIDs, familyMember.MemberUserID) + } + if len(memberUserIDs) == 0 { + return ente.FamilyMemberResponse{}, stacktrace.Propagate(errors.New("member could can not be zero"), "") + } + + usersUsageWithSubData, err := c.UserRepo.GetUserUsageWithSubData(ctx, memberUserIDs) + if err != nil { + return ente.FamilyMemberResponse{}, err + } + var adminSubStorage, adminSubExpiryTime int64 + for i := 0; i < len(familyMembers); i++ { + member := &familyMembers[i] + for _, userUsageData := range usersUsageWithSubData { + if member.MemberUserID == userUsageData.UserID { + member.Email = *userUsageData.Email + // return usage only if the member is part of family group + if member.Status == ente.ACCEPTED || member.Status == ente.SELF { + member.Usage = userUsageData.StorageConsumed + } + if member.IsAdmin { + adminSubStorage = userUsageData.Storage + adminSubExpiryTime = userUsageData.ExpiryTime + } + } + } + } + bonus, err := c.UserCacheCtrl.GetActiveStorageBonus(ctx, familyAdminID) + if err != nil { + return ente.FamilyMemberResponse{}, err + } + adminUsableBonus := int64(0) + if adminSubExpiryTime < time.Microseconds() { + adminUsableBonus = bonus.GetUsableBonus(0) + } else { + adminUsableBonus = bonus.GetUsableBonus(adminSubExpiryTime) + } + + return ente.FamilyMemberResponse{ + Members: familyMembers, + Storage: adminSubStorage, // family plan storage + ExpiryTime: adminSubExpiryTime, + AdminBonus: adminUsableBonus, + }, nil +} + +func (c *Controller) HandleAccountDeletion(ctx context.Context, userID int64, logger *logrus.Entry) error { + user, err := c.UserRepo.Get(userID) + if err != nil { + return stacktrace.Propagate(err, "") + } + if user.FamilyAdminID == nil { + logger.Info("not part of any family, declining any pending invite") + err = c.FamilyRepo.DeclineAnyPendingInvite(ctx, userID) + if err != nil { + return stacktrace.Propagate(err, "") + } + } else if *user.FamilyAdminID != userID { + logger.Info("user is part of family as member/child, leaving family") + err = c.LeaveFamily(ctx, userID) + if err != nil { + return stacktrace.Propagate(err, "") + } + } else { + logger.Info("user is a family admin, revoking invites & removing members") + members, err := c.FetchMembersForAdminID(ctx, userID) + if err != nil { + return stacktrace.Propagate(err, "") + } + + for _, member := range members.Members { + if member.IsAdmin { + continue + } else if member.Status == ente.ACCEPTED { + logger.Info(fmt.Sprintf("removing memeber_id %d", member.MemberUserID)) + err = c.RemoveMember(ctx, userID, member.ID) + if err != nil { + return stacktrace.Propagate(err, "") + } + } else if member.Status == ente.INVITED { + logger.Info(fmt.Sprintf("revoking invite member_id %d", member.MemberUserID)) + err = c.RevokeInvite(ctx, userID, member.ID) + if err != nil { + return stacktrace.Propagate(err, "") + } + } else { + logger.WithField("member", member).Error("unxpected state during account deletion") + } + } + } + return nil +} diff --git a/server/pkg/controller/family/member.go b/server/pkg/controller/family/member.go new file mode 100644 index 000000000..f3df3ee8b --- /dev/null +++ b/server/pkg/controller/family/member.go @@ -0,0 +1,122 @@ +package family + +import ( + "context" + "database/sql" + "errors" + "fmt" + "github.com/ente-io/museum/ente" + "github.com/ente-io/stacktrace" + "github.com/sirupsen/logrus" +) + +// LeaveFamily removes the actor user from the family in which the user belongs to. +func (c *Controller) LeaveFamily(ctx context.Context, userID int64) error { + user, err := c.UserRepo.Get(userID) + if err != nil { + return stacktrace.Propagate(err, "") + } + if user.FamilyAdminID == nil { + logrus.WithField("user_id", userID).Info("not part of any family group") + // user has either not joined any group or already left it. + return nil + } + if *user.FamilyAdminID == userID { + return stacktrace.Propagate(ente.ErrPermissionDenied, "admin can not leave the family group") + } + err = c.FamilyRepo.RemoveMember(ctx, *user.FamilyAdminID, userID, ente.LEFT) + if err != nil { + return stacktrace.Propagate(err, "") + } + go func() { + notificationErr := c.sendNotification(ctx, *user.FamilyAdminID, userID, ente.LEFT, nil) + if notificationErr != nil { + logrus.WithError(notificationErr).Error("family-plan: left notification failed") + } + }() + return nil +} + +// InviteInfo return basic information about the invite +func (c *Controller) InviteInfo(ctx context.Context, token string) (ente.InviteInfoResponse, error) { + familyMember, err := c.FamilyRepo.GetInvite(token) + if err != nil && errors.Is(err, sql.ErrNoRows) { + return ente.InviteInfoResponse{}, stacktrace.Propagate(err, "invite not found") + } else if err != nil { + return ente.InviteInfoResponse{}, stacktrace.Propagate(err, "failed to fetch invite info") + } + + if familyMember.Status != ente.INVITED { + return ente.InviteInfoResponse{}, stacktrace.Propagate(ente.ErrBadRequest, "invited is not valid any more: %s ", familyMember.Status) + } + adminUser, err := c.UserRepo.Get(familyMember.AdminUserID) + if err != nil { + return ente.InviteInfoResponse{}, stacktrace.Propagate(err, "failed to fetch user") + + } + // verify that the invitor is still an admin + if adminUser.FamilyAdminID == nil || *adminUser.FamilyAdminID != adminUser.ID { + return ente.InviteInfoResponse{}, stacktrace.Propagate(fmt.Errorf("inviter is no longer a admin of family plam "), "") + } + return ente.InviteInfoResponse{ + ID: familyMember.ID, + AdminEmail: adminUser.Email, + }, nil +} + +// AcceptInvite accepts a family invite as long as it's in invited state. +func (c *Controller) AcceptInvite(ctx context.Context, token string) (ente.AcceptInviteResponse, error) { + familyMember, err := c.FamilyRepo.GetInvite(token) + if err != nil { + return ente.AcceptInviteResponse{}, stacktrace.Propagate(err, "invite not found") + } + adminUser, err := c.UserRepo.Get(familyMember.AdminUserID) + if err != nil { + return ente.AcceptInviteResponse{}, stacktrace.Propagate(err, "failed to fetch user") + } + // verify that the invitor is still an admin + if adminUser.FamilyAdminID == nil || *adminUser.FamilyAdminID != adminUser.ID { + return ente.AcceptInviteResponse{}, stacktrace.Propagate(fmt.Errorf("inviter is no longer a admin of family plam "), "") + } + + // Accept invitation and notify admin if it's not accepted already + if familyMember.Status != ente.ACCEPTED { + // if the state is invited, accept the invitation and send email notification to the admin. + if familyMember.Status == ente.INVITED { + err = c.FamilyRepo.AcceptInvite(ctx, familyMember.AdminUserID, familyMember.MemberUserID, token) + if err != nil { + return ente.AcceptInviteResponse{}, stacktrace.Propagate(err, "") + } + go func() { + notificationErr := c.sendNotification(ctx, familyMember.AdminUserID, familyMember.MemberUserID, ente.ACCEPTED, nil) + if notificationErr != nil { + logrus.WithError(notificationErr).Error("family-plan: accepted notification failed") + } + }() + } else { + return ente.AcceptInviteResponse{}, stacktrace.Propagate(ente.ErrInvalidPassword, "invited state is not valid any more: %s ", familyMember.Status) + } + } + + bonus, bonusErr := c.UserCacheCtrl.GetActiveStorageBonus(ctx, adminUser.ID) + if bonusErr != nil { + return ente.AcceptInviteResponse{}, bonusErr + } + adminSubscription, subErr := c.BillingCtrl.GetActiveSubscription(adminUser.ID) + if subErr != nil && !errors.Is(subErr, ente.ErrNoActiveSubscription) { + return ente.AcceptInviteResponse{}, stacktrace.Propagate(subErr, "") + } + adminUsableBonus := int64(0) + + if subErr != nil && errors.Is(subErr, ente.ErrNoActiveSubscription) { + adminUsableBonus = bonus.GetUsableBonus(0) + } else { + adminUsableBonus = bonus.GetUsableBonus(adminSubscription.Storage) + } + + return ente.AcceptInviteResponse{ + AdminEmail: adminUser.Email, + Storage: adminSubscription.Storage + adminUsableBonus, + ExpiryTime: adminSubscription.ExpiryTime, + }, nil +} diff --git a/server/pkg/controller/file.go b/server/pkg/controller/file.go new file mode 100644 index 000000000..198d850fd --- /dev/null +++ b/server/pkg/controller/file.go @@ -0,0 +1,878 @@ +package controller + +import ( + "context" + "database/sql" + "encoding/json" + "errors" + "runtime/debug" + "strconv" + "strings" + + "github.com/ente-io/museum/pkg/controller/email" + "github.com/ente-io/museum/pkg/controller/lock" + "github.com/ente-io/museum/pkg/utils/auth" + "github.com/ente-io/museum/pkg/utils/file" + "github.com/ente-io/stacktrace" + "github.com/gin-contrib/requestid" + "github.com/gin-gonic/gin" + "github.com/google/uuid" + + "github.com/aws/aws-sdk-go/service/s3" + "github.com/ente-io/museum/ente" + "github.com/ente-io/museum/pkg/repo" + enteArray "github.com/ente-io/museum/pkg/utils/array" + "github.com/ente-io/museum/pkg/utils/s3config" + "github.com/ente-io/museum/pkg/utils/time" + log "github.com/sirupsen/logrus" +) + +// FileController exposes functions to retrieve and access encrypted files +type FileController struct { + FileRepo *repo.FileRepository + ObjectRepo *repo.ObjectRepository + ObjectCleanupRepo *repo.ObjectCleanupRepository + TrashRepository *repo.TrashRepository + UserRepo *repo.UserRepository + UsageCtrl *UsageController + CollectionRepo *repo.CollectionRepository + TaskLockingRepo *repo.TaskLockRepository + QueueRepo *repo.QueueRepository + S3Config *s3config.S3Config + ObjectCleanupCtrl *ObjectCleanupController + LockController *lock.LockController + EmailNotificationCtrl *email.EmailNotificationController + HostName string + cleanupCronRunning bool +} + +// StorageOverflowAboveSubscriptionLimit is the amount (50 MB) by which user can go beyond their storage limit +const StorageOverflowAboveSubscriptionLimit = int64(1024 * 1024 * 50) + +// MaxFileSize is the maximum file size a user can upload +const MaxFileSize = int64(1024 * 1024 * 1024 * 5) + +// MaxUploadURLsLimit indicates the max number of upload urls which can be request in one go +const MaxUploadURLsLimit = 50 + +// Create adds an entry for a file in the respective tables +func (c *FileController) Create(ctx context.Context, userID int64, file ente.File, userAgent string, app ente.App) (ente.File, error) { + objectPathPrefix := strconv.FormatInt(userID, 10) + "/" + if !strings.HasPrefix(file.File.ObjectKey, objectPathPrefix) || !strings.HasPrefix(file.Thumbnail.ObjectKey, objectPathPrefix) { + return file, stacktrace.Propagate(ente.ErrBadRequest, "Incorrect object key reported") + } + collection, err := c.CollectionRepo.Get(file.CollectionID) + if err != nil { + return file, stacktrace.Propagate(err, "") + } + // Verify that user owns the collection. + // Warning: Do not remove this check + if collection.Owner.ID != userID || file.OwnerID != userID { + return file, stacktrace.Propagate(ente.ErrPermissionDenied, "") + } + if collection.IsDeleted { + return file, stacktrace.Propagate(ente.ErrNotFound, "collection has been deleted") + } + + hotDC := c.S3Config.GetHotDataCenter() + // sizeOf will do also HEAD check to ensure that the object exists in the + // current hot DC + fileSize, err := c.sizeOf(file.File.ObjectKey) + if err != nil { + log.Error("Could not find size of file: " + file.File.ObjectKey) + return file, stacktrace.Propagate(err, "") + } + if fileSize > MaxFileSize { + return file, stacktrace.Propagate(ente.ErrFileTooLarge, "") + } + if file.File.Size != 0 && file.File.Size != fileSize { + return file, stacktrace.Propagate(ente.ErrBadRequest, "mismatch in file size") + } + file.File.Size = fileSize + thumbnailSize, err := c.sizeOf(file.Thumbnail.ObjectKey) + if err != nil { + log.Error("Could not find size of thumbnail: " + file.Thumbnail.ObjectKey) + return file, stacktrace.Propagate(err, "") + } + if file.Thumbnail.Size != 0 && file.Thumbnail.Size != thumbnailSize { + return file, stacktrace.Propagate(ente.ErrBadRequest, "mismatch in thumbnail size") + } + file.Thumbnail.Size = thumbnailSize + var totalUploadSize = fileSize + thumbnailSize + err = c.UsageCtrl.CanUploadFile(ctx, userID, &totalUploadSize, app) + if err != nil { + return file, stacktrace.Propagate(err, "") + } + + file.Info = &ente.FileInfo{ + FileSize: fileSize, + ThumbnailSize: thumbnailSize, + } + + // all iz well + var usage int64 + file, usage, err = c.FileRepo.Create(file, fileSize, thumbnailSize, fileSize+thumbnailSize, collection.Owner.ID, app) + if err != nil { + if err == ente.ErrDuplicateFileObjectFound || err == ente.ErrDuplicateThumbnailObjectFound { + var existing ente.File + if err == ente.ErrDuplicateFileObjectFound { + existing, err = c.FileRepo.GetFileAttributesFromObjectKey(file.File.ObjectKey) + } else { + existing, err = c.FileRepo.GetFileAttributesFromObjectKey(file.Thumbnail.ObjectKey) + } + if err != nil { + return file, stacktrace.Propagate(err, "") + } + file, err = c.onDuplicateObjectDetected(file, existing, hotDC) + if err != nil { + return file, stacktrace.Propagate(err, "") + } + return file, nil + } + return file, stacktrace.Propagate(err, "") + } + if usage == fileSize+thumbnailSize { + go c.EmailNotificationCtrl.OnFirstFileUpload(file.OwnerID, userAgent) + } + return file, nil +} + +// Update verifies permissions and updates the specified file +func (c *FileController) Update(ctx context.Context, userID int64, file ente.File, app ente.App) (ente.UpdateFileResponse, error) { + var response ente.UpdateFileResponse + objectPathPrefix := strconv.FormatInt(userID, 10) + "/" + if !strings.HasPrefix(file.File.ObjectKey, objectPathPrefix) || !strings.HasPrefix(file.Thumbnail.ObjectKey, objectPathPrefix) { + return response, stacktrace.Propagate(ente.ErrBadRequest, "Incorrect object key reported") + } + ownerID, err := c.FileRepo.GetOwnerID(file.ID) + if err != nil { + return response, stacktrace.Propagate(err, "") + } + // verify that user owns the file + if ownerID != userID { + return response, stacktrace.Propagate(ente.ErrPermissionDenied, "") + } + file.OwnerID = ownerID + existingFileObject, err := c.ObjectRepo.GetObject(file.ID, ente.FILE) + if err != nil { + return response, stacktrace.Propagate(err, "") + } + existingFileObjectKey := existingFileObject.ObjectKey + oldFileSize := existingFileObject.FileSize + existingThumbnailObject, err := c.ObjectRepo.GetObject(file.ID, ente.THUMBNAIL) + if err != nil { + return response, stacktrace.Propagate(err, "") + } + existingThumbnailObjectKey := existingThumbnailObject.ObjectKey + oldThumbnailSize := existingThumbnailObject.FileSize + fileSize, err := c.sizeOf(file.File.ObjectKey) + if err != nil { + return response, stacktrace.Propagate(err, "") + } + if fileSize > MaxFileSize { + return response, stacktrace.Propagate(ente.ErrFileTooLarge, "") + } + if file.File.Size != 0 && file.File.Size != fileSize { + return response, stacktrace.Propagate(ente.ErrBadRequest, "mismatch in file size") + } + thumbnailSize, err := c.sizeOf(file.Thumbnail.ObjectKey) + if err != nil { + return response, stacktrace.Propagate(err, "") + } + if file.Thumbnail.Size != 0 && file.Thumbnail.Size != thumbnailSize { + return response, stacktrace.Propagate(ente.ErrBadRequest, "mismatch in thumbnail size") + } + diff := (fileSize + thumbnailSize) - (oldFileSize + oldThumbnailSize) + err = c.UsageCtrl.CanUploadFile(ctx, userID, &diff, app) + if err != nil { + return response, stacktrace.Propagate(err, "") + } + // The client might retry updating the same file accidentally. + // + // This usually happens on iOS, where the first request to update a file + // might succeed, but the client might go into the background before it gets + // to know of it, and then retries again. + // + // As a safety check, also compare the file sizes. + isDuplicateRequest := false + if existingThumbnailObjectKey == file.Thumbnail.ObjectKey && + existingFileObjectKey == file.File.ObjectKey && + diff == 0 { + isDuplicateRequest = true + } + oldObjects := make([]string, 0) + if existingThumbnailObjectKey != file.Thumbnail.ObjectKey { + // Ignore accidental retrials + oldObjects = append(oldObjects, existingThumbnailObjectKey) + } + if existingFileObjectKey != file.File.ObjectKey { + // Ignore accidental retrials + oldObjects = append(oldObjects, existingFileObjectKey) + } + if file.Info != nil { + file.Info.FileSize = fileSize + file.Info.ThumbnailSize = thumbnailSize + } else { + file.Info = &ente.FileInfo{ + FileSize: fileSize, + ThumbnailSize: thumbnailSize, + } + } + err = c.FileRepo.Update(file, fileSize, thumbnailSize, diff, oldObjects, isDuplicateRequest) + if err != nil { + return response, stacktrace.Propagate(err, "") + } + response.ID = file.ID + response.UpdationTime = file.UpdationTime + return response, nil +} + +// GetUploadURLs returns a bunch of presigned URLs for uploading files +func (c *FileController) GetUploadURLs(ctx context.Context, userID int64, count int, app ente.App) ([]ente.UploadURL, error) { + err := c.UsageCtrl.CanUploadFile(ctx, userID, nil, app) + if err != nil { + return []ente.UploadURL{}, stacktrace.Propagate(err, "") + } + s3Client := c.S3Config.GetHotS3Client() + dc := c.S3Config.GetHotDataCenter() + bucket := c.S3Config.GetHotBucket() + urls := make([]ente.UploadURL, 0) + objectKeys := make([]string, 0) + if count > MaxUploadURLsLimit { + count = MaxUploadURLsLimit + } + for i := 0; i < count; i++ { + objectKey := strconv.FormatInt(userID, 10) + "/" + uuid.NewString() + objectKeys = append(objectKeys, objectKey) + url, err := c.getObjectURL(s3Client, dc, bucket, objectKey) + if err != nil { + return urls, stacktrace.Propagate(err, "") + } + urls = append(urls, url) + } + log.Print("Returning objectKeys: " + strings.Join(objectKeys, ", ")) + return urls, nil +} + +// GetFileURL verifies permissions and returns a presigned url to the requested file +func (c *FileController) GetFileURL(userID int64, fileID int64) (string, error) { + err := c.verifyFileAccess(userID, fileID) + if err != nil { + return "", stacktrace.Propagate(err, "") + } + url, err := c.getSignedURLForType(fileID, ente.FILE) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + go c.CleanUpStaleCollectionFiles(userID, fileID) + } + return "", stacktrace.Propagate(err, "") + } + return url, nil +} + +// GetThumbnailURL verifies permissions and returns a presigned url to the requested thumbnail +func (c *FileController) GetThumbnailURL(userID int64, fileID int64) (string, error) { + err := c.verifyFileAccess(userID, fileID) + if err != nil { + return "", stacktrace.Propagate(err, "") + } + url, err := c.getSignedURLForType(fileID, ente.THUMBNAIL) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + go c.CleanUpStaleCollectionFiles(userID, fileID) + } + return "", stacktrace.Propagate(err, "") + } + return url, nil +} + +func (c *FileController) CleanUpStaleCollectionFiles(userID int64, fileID int64) { + logger := log.WithFields(log.Fields{ + "userID": userID, + "fileID": fileID, + "action": "CleanUpStaleCollectionFiles", + }) + // catch panic + defer func() { + if r := recover(); r != nil { + logger.Error("Recovered from panic", r) + } + }() + fileIDs := make([]int64, 0) + fileIDs = append(fileIDs, fileID) + + // verify file ownership + err := c.FileRepo.VerifyFileOwner(context.Background(), fileIDs, userID, logger) + + if err != nil { + logger.Warning("Failed to verify file ownership", err) + return + } + err = c.TrashRepository.CleanUpDeletedFilesFromCollection(context.Background(), fileIDs, userID) + if err != nil { + logger.WithError(err).Error("Failed to clean up stale files from collection") + + } + +} + +// GetPublicFileURL verifies permissions and returns a presigned url to the requested file +func (c *FileController) GetPublicFileURL(ctx *gin.Context, fileID int64, objType ente.ObjectType) (string, error) { + accessContext := auth.MustGetPublicAccessContext(ctx) + accessible, err := c.CollectionRepo.DoesFileExistInCollections(fileID, []int64{accessContext.CollectionID}) + if err != nil { + return "", stacktrace.Propagate(err, "") + } + if !accessible { + return "", stacktrace.Propagate(ente.ErrPermissionDenied, "") + } + return c.getSignedURLForType(fileID, objType) +} + +// GetCastFileUrl verifies permissions and returns a presigned url to the requested file +func (c *FileController) GetCastFileUrl(ctx *gin.Context, fileID int64, objType ente.ObjectType) (string, error) { + castCtx := auth.GetCastCtx(ctx) + accessible, err := c.CollectionRepo.DoesFileExistInCollections(fileID, []int64{castCtx.CollectionID}) + if err != nil { + return "", stacktrace.Propagate(err, "") + } + if !accessible { + return "", stacktrace.Propagate(ente.ErrPermissionDenied, "") + } + return c.getSignedURLForType(fileID, objType) +} + +func (c *FileController) getSignedURLForType(fileID int64, objType ente.ObjectType) (string, error) { + s3Object, err := c.ObjectRepo.GetObject(fileID, objType) + if err != nil { + return "", stacktrace.Propagate(err, "") + } + return c.getPreSignedURL(s3Object.ObjectKey) +} + +// Trash deletes file and move them to trash +func (c *FileController) Trash(ctx *gin.Context, userID int64, request ente.TrashRequest) error { + fileIDs := make([]int64, 0) + collectionIDs := make([]int64, 0) + for _, trashItem := range request.TrashItems { + fileIDs = append(fileIDs, trashItem.FileID) + collectionIDs = append(collectionIDs, trashItem.CollectionID) + } + if enteArray.ContainsDuplicateInInt64Array(fileIDs) { + return stacktrace.Propagate(ente.ErrBadRequest, "duplicate fileIDs") + } + if err := c.VerifyFileOwnership(ctx, userID, fileIDs); err != nil { + return stacktrace.Propagate(err, "") + } + uniqueCollectionIDs := enteArray.UniqueInt64(collectionIDs) + for _, collectionID := range uniqueCollectionIDs { + ownerID, err := c.CollectionRepo.GetOwnerID(collectionID) + if err != nil { + return stacktrace.Propagate(err, "") + } + if ownerID != userID { + return stacktrace.Propagate(ente.ErrPermissionDenied, "user doesn't own collection") + } + } + return c.TrashRepository.TrashFiles(fileIDs, userID, request) +} + +// GetSize returns the size of files indicated by fileIDs that are owned by userID +func (c *FileController) GetSize(userID int64, fileIDs []int64) (int64, error) { + size, err := c.FileRepo.GetSize(userID, fileIDs) + if err != nil { + return -1, stacktrace.Propagate(err, "") + } + return size, nil +} + +// GetFileInfo returns the file infos given list of files +func (c *FileController) GetFileInfo(ctx *gin.Context, userID int64, fileIDs []int64) (*ente.FilesInfoResponse, error) { + logger := log.WithFields(log.Fields{ + "req_id": requestid.Get(ctx), + }) + err := c.FileRepo.VerifyFileOwner(ctx, fileIDs, userID, logger) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + // Use GetFilesInfo for get fileInfo for the given list. + // Then for fileIDs that are not present in the response of GetFilesInfo, use GetFileInfoFromObjectKeys to get the file info. + // and merge the two responses. and for the fileIDs that are not present in the response of GetFileInfoFromObjectKeys, + // add a new FileInfo entry with size = -1 + fileInfoResponse, err := c.FileRepo.GetFilesInfo(ctx, fileIDs, userID) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + fileIDsNotPresentInFilesDB := make([]int64, 0) + for _, fileID := range fileIDs { + if val, ok := fileInfoResponse[fileID]; !ok || val == nil { + fileIDsNotPresentInFilesDB = append(fileIDsNotPresentInFilesDB, fileID) + } + } + if len(fileIDsNotPresentInFilesDB) > 0 { + logger.WithField("count", len(fileIDsNotPresentInFilesDB)).Info("fileInfos are not present in files table, fetching from object keys") + fileInfoResponseFromObjectKeys, err := c.FileRepo.GetFileInfoFromObjectKeys(ctx, fileIDsNotPresentInFilesDB) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + err = c.FileRepo.UpdateSizeInfo(ctx, fileInfoResponseFromObjectKeys) + + if err != nil { + return nil, stacktrace.Propagate(err, "Failed to update the size info in files") + } + + for id, fileInfo := range fileInfoResponseFromObjectKeys { + fileInfoResponse[id] = fileInfo + } + } + missedFileIDs := make([]int64, 0) + for _, fileID := range fileIDs { + if _, ok := fileInfoResponse[fileID]; !ok { + missedFileIDs = append(missedFileIDs, fileID) + } + } + if len(missedFileIDs) > 0 { + return nil, stacktrace.Propagate(ente.NewInternalError("failed to get fileInfo"), "fileIDs not found: %v", missedFileIDs) + } + + // prepare a list of FileInfoResponse + fileInfoList := make([]*ente.FileInfoResponse, 0) + for _, fileID := range fileIDs { + fileInfoList = append(fileInfoList, &ente.FileInfoResponse{ + ID: fileID, + FileInfo: *fileInfoResponse[fileID], + }) + } + return &ente.FilesInfoResponse{ + FilesInfo: fileInfoList, + }, nil +} + +// GetDuplicates returns the list of files of the same size +func (c *FileController) GetDuplicates(userID int64) ([]ente.DuplicateFiles, error) { + dupes, err := c.FileRepo.GetDuplicateFiles(userID) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + return dupes, nil +} + +// GetLargeThumbnailFiles returns the list of files whose thumbnail size is larger than threshold size +func (c *FileController) GetLargeThumbnailFiles(userID int64, threshold int64) ([]int64, error) { + largeThumbnailFiles, err := c.FileRepo.GetLargeThumbnailFiles(userID, threshold) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + return largeThumbnailFiles, nil +} + +// UpdateMagicMetadata updates the magic metadata for list of files +func (c *FileController) UpdateMagicMetadata(ctx *gin.Context, req ente.UpdateMultipleMagicMetadataRequest, isPublicMetadata bool) error { + err := c.validateUpdateMetadataRequest(ctx, req, isPublicMetadata) + if err != nil { + return stacktrace.Propagate(err, "") + } + err = c.FileRepo.UpdateMagicAttributes(ctx, req.MetadataList, isPublicMetadata) + if err != nil { + return stacktrace.Propagate(err, "failed to update magic attributes") + } + return nil +} + +// UpdateThumbnail updates thumbnail of a file +func (c *FileController) UpdateThumbnail(ctx *gin.Context, fileID int64, newThumbnail ente.FileAttributes, app ente.App) error { + userID := auth.GetUserID(ctx.Request.Header) + objectPathPrefix := strconv.FormatInt(userID, 10) + "/" + if !strings.HasPrefix(newThumbnail.ObjectKey, objectPathPrefix) { + return stacktrace.Propagate(ente.ErrBadRequest, "Incorrect object key reported") + } + ownerID, err := c.FileRepo.GetOwnerID(fileID) + if err != nil { + return stacktrace.Propagate(err, "") + } + // verify that user owns the file + if ownerID != userID { + return stacktrace.Propagate(ente.ErrPermissionDenied, "") + } + existingThumbnailObject, err := c.ObjectRepo.GetObject(fileID, ente.THUMBNAIL) + if err != nil { + return stacktrace.Propagate(err, "") + } + existingThumbnailObjectKey := existingThumbnailObject.ObjectKey + oldThumbnailSize := existingThumbnailObject.FileSize + newThumbnailSize, err := c.sizeOf(newThumbnail.ObjectKey) + if err != nil { + return stacktrace.Propagate(err, "") + } + diff := newThumbnailSize - oldThumbnailSize + if diff > 0 { + return stacktrace.Propagate(errors.New("new thumbnail larger than existing thumbnail"), "") + } + err = c.UsageCtrl.CanUploadFile(ctx, userID, &diff, app) + if err != nil { + return stacktrace.Propagate(err, "") + } + var oldObject *string + if existingThumbnailObjectKey != newThumbnail.ObjectKey { + // delete old object only if newThumbnail object key different. + oldObject = &existingThumbnailObjectKey + } + err = c.FileRepo.UpdateThumbnail(ctx, fileID, userID, newThumbnail, newThumbnailSize, diff, oldObject) + if err != nil { + return stacktrace.Propagate(err, "") + } + return nil +} + +// VerifyFileOwnership will return error if given fileIDs are not valid or don't belong to the ownerID +func (c *FileController) VerifyFileOwnership(ctx *gin.Context, ownerID int64, fileIDs []int64) error { + countMap, err := c.FileRepo.GetOwnerToFileCountMap(ctx, fileIDs) + if err != nil { + return stacktrace.Propagate(err, "failed to get owners info") + } + logger := log.WithFields(log.Fields{ + "req_id": requestid.Get(ctx), + "owner_id": ownerID, + "file_ids": fileIDs, + "owners_map": countMap, + }) + if len(countMap) == 0 { + logger.Error("all fileIDs are invalid") + return stacktrace.Propagate(ente.ErrBadRequest, "") + } + if len(countMap) > 1 { + logger.Error("files are owned by multiple users") + return stacktrace.Propagate(ente.ErrPermissionDenied, "") + } + if filesOwned, ok := countMap[ownerID]; ok { + if filesOwned != int64(len(fileIDs)) { + logger.WithField("file_owned", filesOwned).Error("failed to find all fileIDs") + return stacktrace.Propagate(ente.ErrBadRequest, "") + } + return nil + } else { + logger.Error("user is not an owner of any file") + return stacktrace.Propagate(ente.ErrPermissionDenied, "") + } +} + +func (c *FileController) validateUpdateMetadataRequest(ctx *gin.Context, req ente.UpdateMultipleMagicMetadataRequest, isPublicMetadata bool) error { + userID := auth.GetUserID(ctx.Request.Header) + for _, updateMMdRequest := range req.MetadataList { + ownerID, existingMetadata, err := c.FileRepo.GetOwnerAndMagicMetadata(updateMMdRequest.ID, isPublicMetadata) + if err != nil { + return stacktrace.Propagate(err, "") + } + if ownerID != userID { + log.WithFields(log.Fields{ + "file_id": updateMMdRequest.ID, + "owner_id": ownerID, + "user_id": userID, + "public_md": isPublicMetadata, + }).Error("can't update magic metadata for file which isn't owned by use") + return stacktrace.Propagate(ente.ErrPermissionDenied, "") + } + if existingMetadata != nil && (existingMetadata.Version != updateMMdRequest.MagicMetadata.Version || existingMetadata.Count > updateMMdRequest.MagicMetadata.Count) { + log.WithFields(log.Fields{ + "existing_count": existingMetadata.Count, + "existing_version": existingMetadata.Version, + "file_id": updateMMdRequest.ID, + "received_count": updateMMdRequest.MagicMetadata.Count, + "received_version": updateMMdRequest.MagicMetadata.Version, + "public_md": isPublicMetadata, + }).Error("invalid ops: mismatch in metadata version or count") + return stacktrace.Propagate(ente.ErrVersionMismatch, "mismatch in metadata version or count") + } + } + return nil +} + +// CleanupDeletedFiles deletes the files from object store. It will delete from both hot storage and +// cold storage (if replicated) +func (c *FileController) CleanupDeletedFiles() { + log.Info("Cleaning up deleted files") + // If cleanup is already running, avoiding concurrent runs to avoid concurrent issues + if c.cleanupCronRunning { + log.Info("Skipping CleanupDeletedFiles cron run as another instance is still running") + return + } + c.cleanupCronRunning = true + defer func() { + c.cleanupCronRunning = false + }() + items, err := c.QueueRepo.GetItemsReadyForDeletion(repo.DeleteObjectQueue, 200) + if err != nil { + log.WithError(err).Error("Failed to fetch items from queue") + return + } + for _, i := range items { + c.cleanupDeletedFile(i) + } +} + +func (c *FileController) GetTotalFileCount() (int64, error) { + count, err := c.FileRepo.GetTotalFileCount() + if err != nil { + return -1, stacktrace.Propagate(err, "") + } + return count, nil +} + +func (c *FileController) cleanupDeletedFile(qItem repo.QueueItem) { + lockName := file.GetLockNameForObject(qItem.Item) + lockStatus, err := c.TaskLockingRepo.AcquireLock(lockName, time.MicrosecondsAfterHours(1), c.HostName) + ctxLogger := log.WithField("item", qItem.Item).WithField("queue_id", qItem.Id) + if err != nil || !lockStatus { + ctxLogger.Warn("unable to acquire lock") + return + } + defer func() { + err = c.TaskLockingRepo.ReleaseLock(lockName) + if err != nil { + ctxLogger.Errorf("Error while releasing lock %s", err) + } + }() + ctxLogger.Info("Deleting item") + dcs, err := c.ObjectRepo.GetDataCentersForObject(qItem.Item) + if err != nil { + ctxLogger.Errorf("Could not fetch datacenters %s", err) + return + } + for _, dc := range dcs { + if c.S3Config.ShouldDeleteFromDataCenter(dc) { + err = c.ObjectCleanupCtrl.DeleteObjectFromDataCenter(qItem.Item, dc) + } + if err != nil { + ctxLogger.WithError(err).Error("Failed to delete " + qItem.Item + " from " + dc) + return + } + err = c.ObjectRepo.RemoveDataCenterFromObject(qItem.Item, dc) + if err != nil { + ctxLogger.WithError(err).Error("Could not remove from table: " + qItem.Item + ", dc: " + dc) + return + } + } + err = c.QueueRepo.DeleteItem(repo.DeleteObjectQueue, qItem.Item) + if err != nil { + ctxLogger.WithError(err).Error("Failed to remove item from the queue") + return + } + err = c.ObjectRepo.RemoveObjectsForKey(qItem.Item) + if err != nil { + ctxLogger.WithError(err).Error("Failed to remove item from object_keys") + return + } + ctxLogger.Info("Successfully deleted item") +} + +func (c *FileController) getPreSignedURL(objectKey string) (string, error) { + s3Client := c.S3Config.GetHotS3Client() + r, _ := s3Client.GetObjectRequest(&s3.GetObjectInput{ + Bucket: c.S3Config.GetHotBucket(), + Key: &objectKey, + }) + return r.Presign(PreSignedRequestValidityDuration) +} + +func (c *FileController) sizeOf(objectKey string) (int64, error) { + s3Client := c.S3Config.GetHotS3Client() + head, err := s3Client.HeadObject(&s3.HeadObjectInput{ + Key: &objectKey, + Bucket: c.S3Config.GetHotBucket(), + }) + if err != nil { + return -1, stacktrace.Propagate(err, "") + } + return *head.ContentLength, nil +} + +func (c *FileController) onDuplicateObjectDetected(file ente.File, existing ente.File, hotDC string) (ente.File, error) { + newJSON, _ := json.Marshal(file) + existingJSON, _ := json.Marshal(existing) + log.Info("Comparing " + string(newJSON) + " against " + string(existingJSON)) + if file.Thumbnail.ObjectKey == existing.Thumbnail.ObjectKey && + file.Thumbnail.Size == existing.Thumbnail.Size && + file.Thumbnail.DecryptionHeader == existing.Thumbnail.DecryptionHeader && + file.File.ObjectKey == existing.File.ObjectKey && + file.File.Size == existing.File.Size && + file.File.DecryptionHeader == existing.File.DecryptionHeader && + file.Metadata.EncryptedData == existing.Metadata.EncryptedData && + file.Metadata.DecryptionHeader == existing.Metadata.DecryptionHeader && + file.OwnerID == existing.OwnerID { + // Already uploaded file + file.ID = existing.ID + return file, nil + } else { + // Overwrote an existing file or thumbnail + go c.onExistingObjectsReplaced(file, hotDC) + return ente.File{}, ente.ErrBadRequest + } +} + +func (c *FileController) onExistingObjectsReplaced(file ente.File, hotDC string) { + defer func() { + if r := recover(); r != nil { + log.Errorf("Panic caught: %s, stack: %s", r, string(debug.Stack())) + } + }() + log.Error("Replaced existing object, reverting", file) + err := c.rollbackObject(file.File.ObjectKey) + if err != nil { + log.Error("Error rolling back latest file from hot storage", err) + } + err = c.rollbackObject(file.Thumbnail.ObjectKey) + if err != nil { + log.Error("Error rolling back latest thumbnail from hot storage", err) + } + c.FileRepo.ResetNeedsReplication(file, hotDC) +} + +func (c *FileController) rollbackObject(objectKey string) error { + versions, err := c.getVersions(objectKey) + if err != nil { + return stacktrace.Propagate(err, "") + } + if len(versions) > 1 { + err = c.deleteObjectVersionFromHotStorage(objectKey, + *versions[0].VersionId) + if err != nil { + return stacktrace.Propagate(err, "") + } + } + return nil +} + +func (c *FileController) getVersions(objectKey string) ([]*s3.ObjectVersion, error) { + s3Client := c.S3Config.GetHotS3Client() + response, err := s3Client.ListObjectVersions(&s3.ListObjectVersionsInput{ + Prefix: &objectKey, + Bucket: c.S3Config.GetHotBucket(), + }) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + return response.Versions, nil +} + +func (c *FileController) deleteObjectVersionFromHotStorage(objectKey string, versionID string) error { + var s3Client = c.S3Config.GetHotS3Client() + _, err := s3Client.DeleteObject(&s3.DeleteObjectInput{ + Bucket: c.S3Config.GetHotBucket(), + Key: &objectKey, + VersionId: &versionID, + }) + if err != nil { + return stacktrace.Propagate(err, "") + } + err = s3Client.WaitUntilObjectNotExists(&s3.HeadObjectInput{ + Bucket: c.S3Config.GetHotBucket(), + Key: &objectKey, + }) + if err != nil { + return stacktrace.Propagate(err, "") + } + return nil +} + +func (c *FileController) verifyFileAccess(actorUserID int64, fileID int64) error { + fileOwnerID, err := c.FileRepo.GetOwnerID(fileID) + if err != nil { + return stacktrace.Propagate(err, "") + } + + if fileOwnerID != actorUserID { + cIDs, err := c.CollectionRepo.GetCollectionIDsSharedWithUser(actorUserID) + if err != nil { + return stacktrace.Propagate(err, "") + } + cwIDS, err := c.CollectionRepo.GetCollectionIDsSharedWithUser(fileOwnerID) + if err != nil { + return stacktrace.Propagate(err, "") + } + cIDs = append(cIDs, cwIDS...) + + accessible, err := c.CollectionRepo.DoesFileExistInCollections(fileID, cIDs) + if err != nil { + return stacktrace.Propagate(err, "") + } + if !accessible { + return stacktrace.Propagate(ente.ErrPermissionDenied, "") + } + } + return nil +} + +func (c *FileController) getObjectURL(s3Client *s3.S3, dc string, bucket *string, objectKey string) (ente.UploadURL, error) { + r, _ := s3Client.PutObjectRequest(&s3.PutObjectInput{ + Bucket: bucket, + Key: &objectKey, + }) + url, err := r.Presign(PreSignedRequestValidityDuration) + if err != nil { + return ente.UploadURL{}, stacktrace.Propagate(err, "") + } + err = c.ObjectCleanupCtrl.AddTempObjectKey(objectKey, dc) + if err != nil { + return ente.UploadURL{}, stacktrace.Propagate(err, "") + } + return ente.UploadURL{ObjectKey: objectKey, URL: url}, nil +} + +// GetMultipartUploadURLs return collections of url to upload the parts of the files +func (c *FileController) GetMultipartUploadURLs(ctx context.Context, userID int64, count int, app ente.App) (ente.MultipartUploadURLs, error) { + err := c.UsageCtrl.CanUploadFile(ctx, userID, nil, app) + if err != nil { + return ente.MultipartUploadURLs{}, stacktrace.Propagate(err, "") + } + s3Client := c.S3Config.GetHotS3Client() + dc := c.S3Config.GetHotDataCenter() + bucket := c.S3Config.GetHotBucket() + objectKey := strconv.FormatInt(userID, 10) + "/" + uuid.NewString() + r, err := s3Client.CreateMultipartUpload(&s3.CreateMultipartUploadInput{ + Bucket: bucket, + Key: &objectKey, + }) + if err != nil { + return ente.MultipartUploadURLs{}, stacktrace.Propagate(err, "") + } + err = c.ObjectCleanupCtrl.AddMultipartTempObjectKey(objectKey, *r.UploadId, dc) + if err != nil { + return ente.MultipartUploadURLs{}, stacktrace.Propagate(err, "") + } + multipartUploadURLs := ente.MultipartUploadURLs{ObjectKey: objectKey} + urls := make([]string, 0) + for i := 0; i < count; i++ { + url, err := c.getPartURL(*s3Client, objectKey, int64(i+1), r.UploadId) + if err != nil { + return multipartUploadURLs, stacktrace.Propagate(err, "") + } + urls = append(urls, url) + } + multipartUploadURLs.PartURLs = urls + r2, _ := s3Client.CompleteMultipartUploadRequest(&s3.CompleteMultipartUploadInput{ + Bucket: c.S3Config.GetHotBucket(), + Key: &objectKey, + UploadId: r.UploadId, + }) + url, err := r2.Presign(PreSignedRequestValidityDuration) + if err != nil { + return multipartUploadURLs, stacktrace.Propagate(err, "") + } + multipartUploadURLs.CompleteURL = url + + return multipartUploadURLs, nil +} + +func (c *FileController) getPartURL(s3Client s3.S3, objectKey string, partNumber int64, uploadID *string) (string, error) { + r, _ := s3Client.UploadPartRequest(&s3.UploadPartInput{ + Bucket: c.S3Config.GetHotBucket(), + Key: &objectKey, + UploadId: uploadID, + PartNumber: &partNumber, + }) + url, err := r.Presign(PreSignedPartUploadRequestDuration) + if err != nil { + return "", stacktrace.Propagate(err, "") + } + return url, nil +} diff --git a/server/pkg/controller/kex/kex.go b/server/pkg/controller/kex/kex.go new file mode 100644 index 000000000..9a5cdc3bb --- /dev/null +++ b/server/pkg/controller/kex/kex.go @@ -0,0 +1,21 @@ +package kex + +import ( + "github.com/ente-io/museum/pkg/repo/kex" +) + +type Controller struct { + Repo *kex.Repository +} + +func (c *Controller) AddKey(wrappedKey string, customIdentifier string) (identifier string, err error) { + return c.Repo.AddKey(wrappedKey, customIdentifier) +} + +func (c *Controller) GetKey(identifier string) (wrappedKey string, err error) { + return c.Repo.GetKey(identifier) +} + +func (c *Controller) DeleteOldKeys() { + c.Repo.DeleteOldKeys() +} diff --git a/server/pkg/controller/locationtag/controller.go b/server/pkg/controller/locationtag/controller.go new file mode 100644 index 000000000..9b9618117 --- /dev/null +++ b/server/pkg/controller/locationtag/controller.go @@ -0,0 +1,31 @@ +package locationtag + +import ( + "github.com/ente-io/museum/ente" + "github.com/ente-io/museum/pkg/repo/locationtag" + "github.com/gin-gonic/gin" +) + +// Controller is interface for exposing business logic related to location tags +type Controller struct { + Repo *locationtag.Repository +} + +// Create a new location tag in the system +func (c *Controller) Create(ctx *gin.Context, req ente.LocationTag) (ente.LocationTag, error) { + return c.Repo.Create(ctx, req) +} +func (c *Controller) Update(ctx *gin.Context, req ente.LocationTag) (ente.LocationTag, error) { + // todo: verify ownership before updating + panic("implement me") +} + +// Delete the location tag for the given id and ownerId +func (c *Controller) Delete(ctx *gin.Context, req ente.DeleteLocationTagRequest) (bool, error) { + return c.Repo.Delete(ctx, req.ID.String(), req.OwnerID) +} + +// GetDiff fetches the locationTags which have changed after the specified time +func (c *Controller) GetDiff(ctx *gin.Context, req ente.GetLocationTagDiffRequest) ([]ente.LocationTag, error) { + return c.Repo.GetDiff(ctx, req.OwnerID, *req.SinceTime, req.Limit) +} diff --git a/server/pkg/controller/lock/lock.go b/server/pkg/controller/lock/lock.go new file mode 100644 index 000000000..ea7d8bf73 --- /dev/null +++ b/server/pkg/controller/lock/lock.go @@ -0,0 +1,58 @@ +package lock + +import ( + "fmt" + + "github.com/ente-io/museum/pkg/repo" + "github.com/ente-io/stacktrace" + log "github.com/sirupsen/logrus" +) + +// LockController exposes functions to obtain locks before entering critical sections +type LockController struct { + TaskLockingRepo *repo.TaskLockRepository + HostName string +} + +// Try to obtain a lock with the given lockID. +// +// Return false if the lock is already taken. +// +// A call to this function should be matched by a call to ReleaseLock. A common +// pattern is to put the ReleaseLock into a defer statement immediately +// following the lock acquisition. +// +// However, it is also fine to omit the release. Such would be useful for cases +// where we want to ensure the same job cannot run again until the expiry time +// is past. +func (c *LockController) TryLock(lockID string, lockUntil int64) bool { + lockStatus, err := c.TaskLockingRepo.AcquireLock(lockID, lockUntil, c.HostName) + if err != nil || !lockStatus { + return false + } + return true +} + +// ExtendLock refreshes an existing lock by updating its locked_at to now and +// extending its lockUntil. +// +// It is only valid to call this method when holding an existing lock previously +// obtained using TryLock. +func (c *LockController) ExtendLock(lockID string, lockUntil int64) error { + foundLock, err := c.TaskLockingRepo.ExtendLock(lockID, lockUntil, c.HostName) + if err != nil { + return stacktrace.Propagate(err, "Unable to extend lock %v", lockID) + } + if !foundLock { + return fmt.Errorf("no existing lock for %v", lockID) + } + return nil +} + +// Release a lock that was obtained earlier using TryLock. +func (c *LockController) ReleaseLock(lockID string) { + err := c.TaskLockingRepo.ReleaseLock(lockID) + if err != nil { + log.Errorf("Error while releasing lock %v: %s", lockID, err) + } +} diff --git a/server/pkg/controller/mailing_lists.go b/server/pkg/controller/mailing_lists.go new file mode 100644 index 000000000..0cd51e54f --- /dev/null +++ b/server/pkg/controller/mailing_lists.go @@ -0,0 +1,160 @@ +package controller + +import ( + "fmt" + "net/url" + "strings" + + "github.com/ente-io/museum/ente" + "github.com/ente-io/museum/pkg/external/zoho" + "github.com/ente-io/stacktrace" + log "github.com/sirupsen/logrus" + "github.com/spf13/viper" +) + +// MailingListsController is used to keeping the external mailing lists in sync +// with customer email changes. +// +// MailingListsController contains methods for keeping external mailing lists in +// sync when new users sign up, or update their email, or delete their account. +// Currently, these mailing lists are hosted on Zoho Campaigns. +// +// See also: Syncing emails with Zoho Campaigns +type MailingListsController struct { + zohoAccessToken string + zohoListKey string + zohoTopicIds string + zohoCredentials zoho.Credentials +} + +// Return a new instance of MailingListsController +func NewMailingListsController() *MailingListsController { + zohoCredentials := zoho.Credentials{ + ClientID: viper.GetString("zoho.client-id"), + ClientSecret: viper.GetString("zoho.client-secret"), + RefreshToken: viper.GetString("zoho.refresh-token"), + } + + // The Zoho "List Key" identifies a particular list of email IDs that are + // stored in Zoho. All the actions that we perform (adding, removing and + // updating emails) are done on this list. + // + // https://www.zoho.com/campaigns/help/developers/list-management.html + zohoListKey := viper.GetString("zoho.list-key") + + // List of topics to which emails are sent. + // + // Ostensibly, we can get them from their API + // https://www.zoho.com/campaigns/oldhelp/api/get-topics.html + // + // But that doesn't currently work, luckily we can get these IDs by looking + // at the HTML source of the topic update dashboard page. + zohoTopicIds := viper.GetString("zoho.topic-ids") + + // Zoho has a rate limit on the number of access tokens that can created + // within a given time period. So as an aid in debugging, allow the access + // token to be passed in. This will not be present in production - there + // we'll use the refresh token to create an access token on demand. + zohoAccessToken := viper.GetString("zoho.access_token") + + return &MailingListsController{ + zohoCredentials: zohoCredentials, + zohoListKey: zohoListKey, + zohoTopicIds: zohoTopicIds, + zohoAccessToken: zohoAccessToken, + } +} + +// Add the given email address to our default Zoho Campaigns list. +// +// It is valid to resubscribe an email that has previously been unsubscribe. +// +// # Syncing emails with Zoho Campaigns +// +// Zoho Campaigns does not support maintaining a list of raw email addresses +// that can be later updated or deleted via their API. So instead, we maintain +// the email addresses of our customers in a Zoho Campaign "list", and subscribe +// or unsubscribe them to this list. +func (c *MailingListsController) Subscribe(email string) error { + if c.shouldSkip() { + return stacktrace.Propagate(ente.ErrNotImplemented, "") + } + + // Need to set "Signup Form Disabled" in the list settings since we use this + // list to keep track of emails that have already been verified. + // + // > You can use this API to add contacts to your mailing lists. For signup + // form enabled mailing lists, the contacts will receive a confirmation + // email. For signup form disabled lists, contacts will be added without + // any confirmations. + // + // https://www.zoho.com/campaigns/help/developers/contact-subscribe.html + return c.doListAction("listsubscribe", email) +} + +// Unsubscribe the given email address to our default Zoho Campaigns list. +// +// See: [Note: Syncing emails with Zoho Campaigns] +func (c *MailingListsController) Unsubscribe(email string) error { + if c.shouldSkip() { + return stacktrace.Propagate(ente.ErrNotImplemented, "") + } + + // https://www.zoho.com/campaigns/help/developers/contact-unsubscribe.html + return c.doListAction("listunsubscribe", email) +} + +func (c *MailingListsController) shouldSkip() bool { + if c.zohoCredentials.RefreshToken == "" { + log.Info("Skipping mailing list update because credentials are not configured") + return true + } + return false +} + +// Both the listsubscribe and listunsubscribe Zoho Campaigns API endpoints work +// similarly, so use this function to keep the common code. +func (c *MailingListsController) doListAction(action string, email string) error { + // Query escape the email so that any pluses get converted to %2B. + escapedEmail := url.QueryEscape(email) + contactInfo := fmt.Sprintf("{Contact+Email: \"%s\"}", escapedEmail) + // Instead of using QueryEscape, use PathEscape. QueryEscape escapes the "+" + // character, which causes Zoho API to not recognize the parameter. + escapedContactInfo := url.PathEscape(contactInfo) + + url := fmt.Sprintf( + "https://campaigns.zoho.com/api/v1.1/json/%s?resfmt=JSON&listkey=%s&contactinfo=%s&topic_id=%s", + action, c.zohoListKey, escapedContactInfo, c.zohoTopicIds) + + zohoAccessToken, err := zoho.DoRequest("POST", url, c.zohoAccessToken, c.zohoCredentials) + c.zohoAccessToken = zohoAccessToken + + if err != nil { + // This is not necessarily an error, and can happen when the customer + // had earlier unsubscribed from our organization emails in Zoho, + // selecting the "Erase my data" option. This causes Zoho to remove the + // customer's entire record from their database. + // + // Then later, say if the customer deletes their account from ente, we + // would try to unsubscribe their email but it wouldn't be present in + // Zoho, and this API call would've failed. + // + // In such a case, Zoho will return the following response: + // + // { code":"2103", + // "message":"Contact does not exist.", + // "version":"1.1", + // "uri":"/api/v1.1/json/listunsubscribe", + // "status":"error"} + // + // Special case these to reduce the severity level so as to not cause + // error log spam. + if strings.Contains(err.Error(), "Contact does not exist") { + log.Warnf("Zoho - Could not %s '%s': %s", action, email, err) + } else { + log.Errorf("Zoho - Could not %s '%s': %s", action, email, err) + } + } + + return stacktrace.Propagate(err, "") +} diff --git a/server/pkg/controller/object.go b/server/pkg/controller/object.go new file mode 100644 index 000000000..8f197fe46 --- /dev/null +++ b/server/pkg/controller/object.go @@ -0,0 +1,126 @@ +package controller + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/ente-io/museum/pkg/controller/lock" + "github.com/ente-io/museum/pkg/external/wasabi" + "github.com/ente-io/museum/pkg/repo" + "github.com/ente-io/museum/pkg/utils/file" + "github.com/ente-io/museum/pkg/utils/s3config" + "github.com/ente-io/museum/pkg/utils/time" + "github.com/ente-io/stacktrace" + log "github.com/sirupsen/logrus" +) + +// ObjectController manages various operations specific to object storage, +// including dealing with the special cases for individual replicas. +// +// The user's encrypted data is replicated to three places - 2 hot storage data +// centers, and 1 cold storage. All three of them provide S3 compatible APIs +// that we use to add and remove objects. However, there are still some specific +// (and intentional) differences in the way the three replicas work. e.g. +// objects stored in Wasabi are also placed under a special compliance mode, +// which is a Wasabi specific feature. +type ObjectController struct { + S3Config *s3config.S3Config + ObjectRepo *repo.ObjectRepository + QueueRepo *repo.QueueRepository + LockController *lock.LockController + complianceCronRunning bool +} + +// RemoveComplianceHolds removes the Wasabi compliance hold from objects in +// Wasabi for files which have been deleted. +// +// Removing the compliance hold will allow these files to then be deleted when +// we subsequently attempt to delete the objects from Wasabi after +// DeleteObjectQueue delay (x days currently). +func (c *ObjectController) RemoveComplianceHolds() { + if c.S3Config.WasabiComplianceDC() == "" { + // Wasabi compliance is currently disabled in config, nothing to do. + return + } + + if c.complianceCronRunning { + log.Info("Skipping RemoveComplianceHolds cron run as another instance is still running") + return + } + c.complianceCronRunning = true + defer func() { + c.complianceCronRunning = false + }() + + items, err := c.QueueRepo.GetItemsReadyForDeletion(repo.RemoveComplianceHoldQueue, 200) + if err != nil { + log.WithError(err).Error("Failed to fetch items from queue") + return + } + + log.Infof("Removing compliance holds on %d deleted files", len(items)) + for _, i := range items { + c.removeComplianceHold(i) + } + + log.Infof("Revmoed compliance holds on %d deleted files", len(items)) +} + +func (c *ObjectController) removeComplianceHold(qItem repo.QueueItem) { + logger := log.WithFields(log.Fields{ + "item": qItem.Item, + "queue_id": qItem.Id, + }) + + objectKey := qItem.Item + + lockName := file.GetLockNameForObject(objectKey) + if !c.LockController.TryLock(lockName, time.MicrosecondsAfterHours(1)) { + logger.Info("Unable to acquire lock") + return + } + defer c.LockController.ReleaseLock(lockName) + + dcs, err := c.ObjectRepo.GetDataCentersForObject(objectKey) + if err != nil { + logger.Error("Could not fetch datacenters", err) + return + } + + config := c.S3Config + complianceDC := config.WasabiComplianceDC() + s3Client := config.GetS3Client(complianceDC) + bucket := *config.GetBucket(complianceDC) + + for _, dc := range dcs { + if dc == complianceDC { + logger.Info("Removing compliance hold") + err = c.DisableObjectConditionalHold(&s3Client, bucket, objectKey) + if err != nil { + logger.WithError(err).Errorf("Failed to remove compliance hold (dc: %s, bucket: %s)", dc, bucket) + return + } + logger.Infof("Removed compliance hold for %s/%s", bucket, objectKey) + break + } + } + + err = c.QueueRepo.DeleteItem(repo.RemoveComplianceHoldQueue, qItem.Item) + if err != nil { + logger.WithError(err).Error("Failed to remove item from the queue") + return + } +} + +// DisableObjectConditionalHold disables the Wasabi compliance conditional hold +// that has been placed on object. This way, we can enable these objects to be +// cleaned up when the user permanently deletes them. +func (c *ObjectController) DisableObjectConditionalHold(s3Client *s3.S3, bucket string, objectKey string) error { + _, err := wasabi.PutObjectCompliance(s3Client, &wasabi.PutObjectComplianceInput{ + Bucket: aws.String(bucket), + Key: aws.String(objectKey), + ObjectComplianceConfiguration: &wasabi.ObjectComplianceConfiguration{ + ConditionalHold: aws.Bool(false), + }, + }) + return stacktrace.Propagate(err, "Failed to update ObjectCompliance for %s/%s", bucket, objectKey) +} diff --git a/server/pkg/controller/object_cleanup.go b/server/pkg/controller/object_cleanup.go new file mode 100644 index 000000000..a1ba2dba5 --- /dev/null +++ b/server/pkg/controller/object_cleanup.go @@ -0,0 +1,684 @@ +package controller + +import ( + "database/sql" + "errors" + "fmt" + "strings" + "sync" + stime "time" + + "github.com/ente-io/museum/pkg/controller/lock" + "github.com/ente-io/stacktrace" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/spf13/viper" + + "github.com/aws/aws-sdk-go/service/s3" + "github.com/ente-io/museum/ente" + "github.com/ente-io/museum/pkg/repo" + "github.com/ente-io/museum/pkg/utils/s3config" + enteString "github.com/ente-io/museum/pkg/utils/string" + "github.com/ente-io/museum/pkg/utils/time" + log "github.com/sirupsen/logrus" +) + +// ObjectCleanupController exposes functions to remove orphan and stale entries +// from object storage. +// +// There are 3 main types of orphans that can end up in our object storage: +// +// 1. We create presigned URLs for clients to upload their objects to. It might +// happen that the client is able to successfully upload to these URLs, but +// not tell museum about the successful upload. +// +// 2. During replication, we might have half-done multipart uploads. +// +// 3. When an existing object is updated (e.g. the user edits the file on iOS), +// then the file entry in our DB is updated to point to the new object, and +// the old object is now meant to be discarded. +// +// ObjectCleanupController is meant to manage all these scenarios over time. +type ObjectCleanupController struct { + Repo *repo.ObjectCleanupRepository + ObjectRepo *repo.ObjectRepository + LockController *lock.LockController + ObjectController *ObjectController + S3Config *s3config.S3Config + // Prometheus Metrics + mOrphanObjectsDeleted *prometheus.CounterVec +} + +// PreSignedRequestValidityDuration is the lifetime of a pre-signed URL +const PreSignedRequestValidityDuration = 7 * 24 * stime.Hour + +// PreSignedPartUploadRequestDuration is the lifetime of a pre-signed multipart URL +const PreSignedPartUploadRequestDuration = 7 * 24 * stime.Hour + +// clearOrphanObjectsCheckInterval is the interval after which we check if the +// ClearOrphanObjects job needs to be re-run. +// +// See also, clearOrphanObjectsMinimumJobInterval. +const clearOrphanObjectsCheckInterval = 1 * 24 * stime.Hour + +// ClearOrphanObjectsMinimumJobInterval is the minimum interval that must pass +// before we run another instance of the ClearOrphanObjects job. +// +// This interval is enforced across museum instances. +const clearOrphanObjectsMinimumJobInterval = 2 * 24 * stime.Hour + +// Return a new instance of ObjectCleanupController +func NewObjectCleanupController( + objectCleanupRepo *repo.ObjectCleanupRepository, + objectRepo *repo.ObjectRepository, + lockController *lock.LockController, + objectController *ObjectController, + s3Config *s3config.S3Config, +) *ObjectCleanupController { + mOrphanObjectsDeleted := promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "museum_orphan_objects_deleted_total", + Help: "Number of objects successfully deleted when clearing orphan objects", + }, []string{"dc"}) + + return &ObjectCleanupController{ + Repo: objectCleanupRepo, + ObjectRepo: objectRepo, + LockController: lockController, + ObjectController: objectController, + S3Config: s3Config, + mOrphanObjectsDeleted: mOrphanObjectsDeleted, + } +} + +// StartRemovingUnreportedObjects starts goroutines to cleanup deletes those +// objects that were possibly uploaded but not reported to the database +func (c *ObjectCleanupController) StartRemovingUnreportedObjects() { + // TODO: object_cleanup: This code is only currently tested for B2 + if c.S3Config.GetHotDataCenter() != c.S3Config.GetHotBackblazeDC() { + log.Info("Skipping RemovingUnreportedObjects since the Hot DC is not B2") + return + } + + workerCount := viper.GetInt("jobs.remove-unreported-objects.worker-count") + if workerCount == 0 { + workerCount = 1 + } + + log.Infof("Starting %d workers to remove-unreported-objects", workerCount) + + for i := 0; i < workerCount; i++ { + go c.removeUnreportedObjectsWorker(i) + } +} + +// Entry point for the worker goroutine to cleanup unreported objects. +// +// i is an arbitrary index for the current goroutine. +func (c *ObjectCleanupController) removeUnreportedObjectsWorker(i int) { + for { + count := c.removeUnreportedObjects() + if count == 0 { + stime.Sleep(stime.Duration(5+i) * stime.Minute) + } else { + stime.Sleep(stime.Second) + } + } +} + +func (c *ObjectCleanupController) removeUnreportedObjects() int { + logger := log.WithFields(log.Fields{ + "task": "remove-unreported-objects", + }) + logger.Info("Removing unreported objects") + + count := 0 + + tx, tempObjects, err := c.Repo.GetAndLockExpiredObjects() + if err != nil { + if !errors.Is(err, sql.ErrNoRows) { + logger.Error(err) + } + return count + } + + for _, tempObject := range tempObjects { + err = c.removeUnreportedObject(tx, tempObject) + if err != nil { + continue + } + count += 1 + } + + logger.Infof("Removed %d objects", count) + + // We always commit the transaction, even on errors for individual rows. To + // avoid object getting stuck in a loop, we increase their expiry times. + + cerr := tx.Commit() + if cerr != nil { + cerr = stacktrace.Propagate(err, "Failed to commit transaction") + logger.Error(cerr) + } + + return count +} + +func (c *ObjectCleanupController) removeUnreportedObject(tx *sql.Tx, t ente.TempObject) error { + // TODO: object_cleanup + // This should use the DC from TempObject (once we start persisting it) + // dc := t.DataCenter + dc := c.S3Config.GetHotDataCenter() + + logger := log.WithFields(log.Fields{ + "task": "remove-unreported-objects", + "object_key": t.ObjectKey, + "data_center": dc, + "upload_id": t.UploadID, + }) + + skip := func(err error) error { + logger.Errorf("Clearing tempObject failed: %v", err) + newExpiry := time.MicrosecondsAfterDays(1) + serr := c.Repo.SetExpiryForTempObject(tx, t, newExpiry) + if serr != nil { + logger.Errorf("Updating expiry for failed temp object failed: %v", serr) + } + return err + } + + logger.Info("Clearing tempObject") + + exists, err := c.ObjectRepo.DoesObjectExist(tx, t.ObjectKey) + if err != nil { + return skip(stacktrace.Propagate(err, "")) + } + + if exists { + err := errors.New("aborting attempt to delete an object which has a DB entry") + return skip(stacktrace.Propagate(err, "")) + } + + if t.IsMultipart { + err = c.abortMultipartUpload(t.ObjectKey, t.UploadID, dc) + } else { + err = c.DeleteObjectFromDataCenter(t.ObjectKey, dc) + } + if err != nil { + return skip(err) + } + + err = c.Repo.RemoveTempObject(tx, t) + if err != nil { + return skip(err) + } + + return nil +} + +// AddTempObjectKey creates a new temporary object entry. +// +// It persists a given object key as having been provided to a client for +// uploading. If a client does not successfully mark this object's upload as +// having completed within PreSignedRequestValidityDuration, this temp object +// will be cleaned up. +func (c *ObjectCleanupController) AddTempObjectKey(objectKey string, dc string) error { + expiry := time.Microseconds() + (2 * PreSignedRequestValidityDuration.Microseconds()) + return c.addCleanupEntryForObjectKey(objectKey, dc, expiry) +} + +// Add the object to a queue of "temporary" objects that are deleted (if they +// exist) if this entry is not removed from the queue by expirationTime. +func (c *ObjectCleanupController) addCleanupEntryForObjectKey(objectKey string, dc string, expirationTime int64) error { + err := c.Repo.AddTempObject(ente.TempObject{ + ObjectKey: objectKey, + IsMultipart: false, + DataCenter: dc, + }, expirationTime) + return stacktrace.Propagate(err, "") +} + +// AddTempObjectMultipartKey creates a new temporary object entry for a +// multlipart upload. +// +// See AddTempObjectKey for more details. +func (c *ObjectCleanupController) AddMultipartTempObjectKey(objectKey string, uploadID string, dc string) error { + expiry := time.Microseconds() + (2 * PreSignedPartUploadRequestDuration.Microseconds()) + err := c.Repo.AddTempObject(ente.TempObject{ + ObjectKey: objectKey, + IsMultipart: true, + UploadID: uploadID, + DataCenter: dc, + }, expiry) + return stacktrace.Propagate(err, "") +} + +func (c *ObjectCleanupController) DeleteAllObjectsWithPrefix(prefix string, dc string) error { + s3Client := c.S3Config.GetS3Client(dc) + bucket := c.S3Config.GetBucket(dc) + output, err := s3Client.ListObjectsV2(&s3.ListObjectsV2Input{ + Bucket: bucket, + Prefix: &prefix, + }) + if err != nil { + log.Error(err) + return stacktrace.Propagate(err, "") + } + var keys []string + for _, obj := range output.Contents { + keys = append(keys, *obj.Key) + } + for _, key := range keys { + err = c.DeleteObjectFromDataCenter(key, dc) + if err != nil { + log.Error(err) + return stacktrace.Propagate(err, "") + } + } + return nil +} + +func (c *ObjectCleanupController) DeleteObjectFromDataCenter(objectKey string, dc string) error { + log.Info("Deleting " + objectKey + " from " + dc) + var s3Client = c.S3Config.GetS3Client(dc) + bucket := c.S3Config.GetBucket(dc) + _, err := s3Client.DeleteObject(&s3.DeleteObjectInput{ + Bucket: bucket, + Key: &objectKey, + }) + if err != nil { + return stacktrace.Propagate(err, "") + } + err = s3Client.WaitUntilObjectNotExists(&s3.HeadObjectInput{ + Bucket: bucket, + Key: &objectKey, + }) + if err != nil { + return stacktrace.Propagate(err, "") + } + return nil +} + +func (c *ObjectCleanupController) abortMultipartUpload(objectKey string, uploadID string, dc string) error { + s3Client := c.S3Config.GetS3Client(dc) + bucket := c.S3Config.GetBucket(dc) + _, err := s3Client.AbortMultipartUpload(&s3.AbortMultipartUploadInput{ + Bucket: bucket, + Key: &objectKey, + UploadId: &uploadID, + }) + if err != nil { + if isUnknownUploadError(err) { + log.Info("Could not find upload for " + objectKey) + return nil + } + return stacktrace.Propagate(err, "") + } + r, err := s3Client.ListParts(&s3.ListPartsInput{ + Bucket: bucket, + Key: &objectKey, + UploadId: &uploadID, + }) + if err != nil { + if isUnknownUploadError(err) { + // This is expected now, since we just aborted the upload + return nil + } + return stacktrace.Propagate(err, "") + } + if len(r.Parts) > 0 { + return stacktrace.NewError("abort Failed") + } + return nil +} + +// The original code here checked for NoSuchUpload, presumably because that is +// the error that B2 returns. +// +// Wasabi returns something similar: +// +// +// NoSuchUpload +// The specified upload does not exist. The upload ID may be invalid, +// or the upload may have been aborted or completed. +// ... +// +// However, Scaleway returns a different error, NoSuchKey +// +// +// NoSuchKey +// The specified key does not exist. +// ... +// +// This method returns true if either of these occur. +func isUnknownUploadError(err error) bool { + // B2, Wasabi + if strings.Contains(err.Error(), "NoSuchUpload") { + return true + } + // Scaleway + if strings.Contains(err.Error(), "NoSuchKey") { + return true + } + return false +} + +// StartClearingOrphanObjects is the entry point for the job that goes through +// all the objects in the given datacenter, and deletes orphan objects for which +// we do not have DB entries. +// +// Such orphan objects are expected to have been created because the code for +// updating the DB entries when a file gets updated did not cleanup the +// corresponding objects from object storage. Once we start keeping track of +// such objects in a separate queue, this cron job won't be needed. +func (c *ObjectCleanupController) StartClearingOrphanObjects() { + // TODO: object_cleanup: This code is only currently tested for B2 + if c.S3Config.GetHotDataCenter() != c.S3Config.GetHotBackblazeDC() { + log.Info("Skipping ClearingOrphanObjects since the Hot DC is not B2") + return + } + + isJobEnabled := viper.GetBool("jobs.clear-orphan-objects.enabled") + if !isJobEnabled { + return + } + + prefix := viper.GetString("jobs.clear-orphan-objects.prefix") + + log.Infof("Starting workers to clear-orphan-objects (prefix %s)", prefix) + + // TODO: object_cleanup: start workers for other DCs once the temp_objects + // table supports specifying a DC + go c.clearOrphanObjectsWorker(c.S3Config.GetHotBackblazeDC(), prefix) +} + +// clearOrphanObjectsWorker is the entry point for the worker goroutine to +// cleanup objects in a particular DC. +func (c *ObjectCleanupController) clearOrphanObjectsWorker(dc string, prefix string) { + for { + c.ClearOrphanObjects(dc, prefix, false) + stime.Sleep(clearOrphanObjectsCheckInterval) + } +} + +// IsValidClearOrphanObjectsDC verifies that the given DC is valid for use as +// the target of an orphan object cleanup. +func (c *ObjectCleanupController) IsValidClearOrphanObjectsDC(dc string) bool { + if dc != c.S3Config.GetHotBackblazeDC() { + return false + } + + // TODO: object_cleanup: This code is only currently tested for B2 + if c.S3Config.GetHotDataCenter() != c.S3Config.GetHotBackblazeDC() { + return false + } + + return true +} + +func (c *ObjectCleanupController) ClearOrphanObjects(dc string, prefix string, forceTaskLock bool) { + logger := log.WithFields(log.Fields{ + "task": "clear-orphan-objects", + "data_center": dc, + }) + + if !c.IsValidClearOrphanObjectsDC(dc) { + logger.Errorf("Unsupported DC %s", dc) + return + } + + lockName := clearOrphanObjectsLockName(dc) + + if forceTaskLock { + logger.Infof("Forcefully removing task lock %s", lockName) + err := c.LockController.TaskLockingRepo.ReleaseLock(lockName) + if err != nil { + logger.Error(stacktrace.Propagate(err, "")) + return + } + } + + if !c.LockController.TryLock(lockName, clearOrphanObjectsNextLockUntil()) { + logger.Infof("Skipping since a lock could not be obtained") + return + } + // The lock is not released intentionally + // + // By keeping the stale entry for the unheld lock in the DB, we will be able + // to retain the timestamp when this job last ran. This is a kludgy way to + // guarantee that clearOrphanObjectsMinimumJobInterval is enforced across + // all museum instances (without introducing a new DB table). + // + // defer c.LockController.ReleaseLock(lockName) + + s3Config := c.S3Config + dest := &CleanupOrphanObjectsDestination{ + DC: dc, + Client: s3Config.GetS3Client(dc), + Bucket: s3Config.GetBucket(dc), + HasComplianceHold: s3Config.WasabiComplianceDC() == dc, + } + + logger.Infof("Clearing orphan objects from bucket %s (hasComplianceHold %v)", + *dest.Bucket, dest.HasComplianceHold) + + // Each directory listing of an S3 bucket returns a maximum of 1000 objects, + // and an optional continuation token. Until there are more objects + // (indicated by the presence of the continuation token), keep fetching + // directory listings. + // + // For each directory listing, spawn 10 goroutines to go through chunks of + // 100 each to clear orphan objects. + // + // Refresh the lock's acquisition time during each iteration since this job + // can span hours, and we don't want a different instance to start another + // run just because it was only considering the start time of the job. + + err := dest.Client.ListObjectVersionsPages(&s3.ListObjectVersionsInput{ + Bucket: dest.Bucket, + Prefix: &prefix, + }, + func(page *s3.ListObjectVersionsOutput, lastPage bool) bool { + c.clearOrphanObjectsPage(page, dest, logger) + + lerr := c.LockController.ExtendLock(lockName, clearOrphanObjectsNextLockUntil()) + if lerr != nil { + logger.Error(lerr) + return false + } + + return true + }) + if err != nil { + logger.Error(stacktrace.Propagate(err, "")) + return + } + + logger.Info("Cleared orphan objects") +} + +func clearOrphanObjectsLockName(dc string) string { + return fmt.Sprintf("clear-orphan-objects:%s", dc) +} + +func clearOrphanObjectsNextLockUntil() int64 { + return time.Microseconds() + clearOrphanObjectsMinimumJobInterval.Microseconds() +} + +type CleanupOrphanObjectsDestination struct { + DC string + Client s3.S3 + Bucket *string + // If true, this bucket has a compliance hold on objects that needs to be + // removed first before they can be deleted. + HasComplianceHold bool +} + +// ObjectVersionOrDeleteMarker is an abstraction to allow us to reuse the same +// code to delete both object versions and delete markers +type ObjectVersionOrDeleteMarker struct { + ObjectVersion *s3.ObjectVersion + DeleteMarker *s3.DeleteMarkerEntry +} + +func (od ObjectVersionOrDeleteMarker) GetKey() *string { + if od.ObjectVersion != nil { + return od.ObjectVersion.Key + } + return od.DeleteMarker.Key +} + +func (od ObjectVersionOrDeleteMarker) GetLastModified() *stime.Time { + if od.ObjectVersion != nil { + return od.ObjectVersion.LastModified + } + return od.DeleteMarker.LastModified +} + +func (od ObjectVersionOrDeleteMarker) GetVersionId() *string { + if od.ObjectVersion != nil { + return od.ObjectVersion.VersionId + } + return od.DeleteMarker.VersionId +} + +func (c *ObjectCleanupController) clearOrphanObjectsPage(page *s3.ListObjectVersionsOutput, dest *CleanupOrphanObjectsDestination, logger *log.Entry) error { + // MaxKeys is 1000. Until we can, break it into batches and create a + // separate goroutine to process each batch. + batchSize := 10 + + versions := page.Versions + nv := len(versions) + deleteMarkers := page.DeleteMarkers + nd := len(deleteMarkers) + n := nv + nd + + logger.Infof("Processing page containing %d values (%d object versions, %d delete markers)", n, nv, nd) + + ods := make([]ObjectVersionOrDeleteMarker, n) + for i := 0; i < nv; i++ { + ods[i] = ObjectVersionOrDeleteMarker{ObjectVersion: versions[i]} + } + for i := 0; i < nd; i++ { + ods[nv+i] = ObjectVersionOrDeleteMarker{DeleteMarker: deleteMarkers[i]} + } + + var wg sync.WaitGroup + + for i := 0; i < n; i++ { + end := i + batchSize + if end > n { + end = n + } + + if i >= end { + // Nothing left + break + } + + wg.Add(1) + go func(i int, end int) { + defer wg.Done() + batch := ods[i:end] + c.clearOrphanObjectsVersionOrDeleteMarkers(batch, dest, logger) + }(i, end) + + i = end + } + + wg.Wait() + + return nil +} + +func (c *ObjectCleanupController) clearOrphanObjectsVersionOrDeleteMarkers(ods []ObjectVersionOrDeleteMarker, dest *CleanupOrphanObjectsDestination, logger *log.Entry) { + for _, od := range ods { + c.clearOrphanObjectsVersionOrDeleteMarker(od, dest, logger) + } +} + +func (c *ObjectCleanupController) clearOrphanObjectsVersionOrDeleteMarker(od ObjectVersionOrDeleteMarker, dest *CleanupOrphanObjectsDestination, logger *log.Entry) { + if od.GetKey() == nil || od.GetLastModified() == nil { + logger.Errorf("Ignoring object with missing fields: %v %v", od.GetKey(), od.GetLastModified()) + return + } + + objectKey := *od.GetKey() + lastModified := *od.GetLastModified() + + logger = logger.WithFields(log.Fields{ + "object_key": objectKey, + "last_modified": lastModified, + }) + + exists, err := c.ObjectRepo.DoesObjectOrTempObjectExist(objectKey) + if err != nil { + logger.Error(stacktrace.Propagate(err, "Failed to determine if object already exists in DB")) + return + } + + if exists { + return + } + + // 2 days ago + cutoff := stime.Now().AddDate(0, 0, -2) + + // As a safety check, ignore very recent objects from cleanup + if lastModified.After(cutoff) { + logger.Warnf("Ignoring too-recent orphan object since it was modified after %v", cutoff) + return + } + + logger.Infof("Found orphan object %v", od) + + if dest.HasComplianceHold { + // Remove compliance hold. + err := c.ObjectController.DisableObjectConditionalHold(&dest.Client, *dest.Bucket, objectKey) + if err != nil { + logger.Error(stacktrace.Propagate(err, "Failed to disable conditional hold on object")) + return + } + + // Add the object to the cleanup queue with an expiry time that is after + // the compliance hold would've passed. Add 2 days of buffer too. + expiryDays := s3config.WasabiObjectConditionalHoldDays + 2 + expiryTime := time.MicrosecondsAfterDays(expiryDays) + c.addCleanupEntryForObjectKey(objectKey, dest.DC, expiryTime) + + logger.Infof("Disabled compliance hold and added an entry to cleanup orphan object after %v", expiryTime) + } else { + // Delete it right away. + versionID := od.GetVersionId() + logger.Infof("Deleting version '%s'", enteString.EmptyIfNil(versionID)) + err := c.DeleteObjectVersion(objectKey, versionID, dest) + if err != nil { + logger.Error(stacktrace.Propagate(err, "Failed to delete object")) + } + + c.mOrphanObjectsDeleted.WithLabelValues(dest.DC).Inc() + } +} + +// DeleteObjectVersion can be used to delete objects from versioned buckets. +// +// If we delete an object in a versioning enabled bucket, deletion does not +// actually remove the object and instead creates a delete marker: +// +// - When we delete a file, it creates a delete marker +// - The delete marker becomes the latest version +// - The old version of the file still remains +// +// If we explicitly pass a version ID in the delete call, then the delete marker +// won't get created. +// +// > To delete versioned objects permanently, use `DELETE Object versionId` +// +// https://docs.aws.amazon.com/AmazonS3/latest/userguide/DeletingObjectVersions.html +func (c *ObjectCleanupController) DeleteObjectVersion(objectKey string, versionID *string, dest *CleanupOrphanObjectsDestination) error { + _, err := dest.Client.DeleteObject(&s3.DeleteObjectInput{ + Bucket: dest.Bucket, + Key: &objectKey, + VersionId: versionID, + }) + return stacktrace.Propagate(err, "") +} diff --git a/server/pkg/controller/offer/offer.go b/server/pkg/controller/offer/offer.go new file mode 100644 index 000000000..44f1bce58 --- /dev/null +++ b/server/pkg/controller/offer/offer.go @@ -0,0 +1,116 @@ +package offer + +import ( + "context" + "database/sql" + "encoding/json" + "errors" + "os" + + "github.com/ente-io/museum/pkg/controller/usercache" + + "github.com/ente-io/museum/ente" + "github.com/ente-io/museum/pkg/controller/discord" + "github.com/ente-io/museum/pkg/repo" + "github.com/ente-io/museum/pkg/repo/storagebonus" + "github.com/ente-io/museum/pkg/utils/array" + "github.com/ente-io/museum/pkg/utils/billing" + "github.com/ente-io/museum/pkg/utils/config" + "github.com/ente-io/museum/pkg/utils/time" + "github.com/ente-io/stacktrace" + log "github.com/sirupsen/logrus" +) + +// OfferController controls all offer related operations +type OfferController struct { + BlackFridayOffers ente.BlackFridayOfferPerCountry + UserRepo repo.UserRepository + DiscordController *discord.DiscordController + StorageBonusRepo *storagebonus.Repository + UserCacheCtrl *usercache.Controller +} + +func NewOfferController( + userRepo repo.UserRepository, + discordController *discord.DiscordController, + storageBonusRepo *storagebonus.Repository, + userCacheCtrl *usercache.Controller, +) *OfferController { + blackFridayOffers := make(ente.BlackFridayOfferPerCountry) + path, err := config.BillingConfigFilePath("black-friday.json") + if err != nil { + log.Fatalf("Error getting offer config file: %v", err) + } + data, err := os.ReadFile(path) + if err != nil { + log.Error("Error reading config file", err) + } + err = json.Unmarshal(data, &blackFridayOffers) + if err != nil { + log.Error("Error un-marshalling JSON", err) + } + return &OfferController{ + BlackFridayOffers: blackFridayOffers, + UserRepo: userRepo, + DiscordController: discordController, + StorageBonusRepo: storageBonusRepo, + UserCacheCtrl: userCacheCtrl, + } +} + +func (c *OfferController) GetBlackFridayOffers(countryCode string) []ente.BlackFridayOffer { + if array.StringInList(countryCode, billing.CountriesInEU) { + countryCode = "EU" + } + + if offers, found := c.BlackFridayOffers[countryCode]; found { + return offers + } + // unable to find plans for given country code, return plans for default country + defaultCountry := billing.GetDefaultPlanCountry() + return c.BlackFridayOffers[defaultCountry] +} + +func (c *OfferController) ApplyOffer(email string, productID string) error { + var offerToBeApplied ente.BlackFridayOffer + found := false + for _, offers := range c.BlackFridayOffers { + for _, offer := range offers { + if offer.ID == productID { + found = true + offerToBeApplied = offer + } + } + } + if !found { + return stacktrace.Propagate(ente.ErrNotFound, "Could not find an offer for "+productID) + } + var validTill int64 + if offerToBeApplied.Period == ente.Period3Years { + validTill = time.NDaysFromNow(3 * 365) + } else if offerToBeApplied.Period == ente.Period5Years { + validTill = time.NDaysFromNow(5 * 365) + } else { + return stacktrace.Propagate(ente.ErrNotFound, "Could not find a valid time period for "+productID) + } + + userID, err := c.UserRepo.GetUserIDWithEmail(email) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + log.Error("Product purchased with unknown email: " + email) + c.DiscordController.Notify("Unknown user paid " + offerToBeApplied.Price) + return nil + } else { + return stacktrace.Propagate(err, "") + } + } + + err = c.StorageBonusRepo.InsertBFBonus(context.Background(), userID, validTill, offerToBeApplied.Storage) + if err != nil { + c.DiscordController.Notify("Error inserting bonus") + return stacktrace.Propagate(err, "") + } + go c.UserCacheCtrl.GetActiveStorageBonus(context.Background(), userID) + c.DiscordController.NotifyBlackFridayUser(userID, offerToBeApplied.Price) + return nil +} diff --git a/server/pkg/controller/passkeys.go b/server/pkg/controller/passkeys.go new file mode 100644 index 000000000..3cc6031d0 --- /dev/null +++ b/server/pkg/controller/passkeys.go @@ -0,0 +1,84 @@ +package controller + +import ( + "net/http" + + "github.com/ente-io/museum/ente" + "github.com/ente-io/museum/pkg/repo" + "github.com/ente-io/museum/pkg/repo/passkey" + "github.com/go-webauthn/webauthn/protocol" + "github.com/go-webauthn/webauthn/webauthn" + "github.com/google/uuid" +) + +type PasskeyController struct { + Repo *passkey.Repository + UserRepo *repo.UserRepository +} + +func (c *PasskeyController) GetPasskeys(userID int64) (passkeys []ente.Passkey, err error) { + user, err := c.UserRepo.Get(userID) + if err != nil { + return + } + + return c.Repo.GetUserPasskeys(user.ID) +} + +func (c *PasskeyController) DeletePasskey(userID int64, passkeyID uuid.UUID) (err error) { + user, err := c.UserRepo.Get(userID) + if err != nil { + return + } + + return c.Repo.DeletePasskey(&user, passkeyID) +} + +// RemovePasskey2FA removes all the user's passkeys to disable passkey 2FA and fall back to TOTP based 2FA if enabled. +func (c *PasskeyController) RemovePasskey2FA(userID int64) (err error) { + passkeys, err := c.GetPasskeys(userID) + if err != nil { + return + } + + for _, passkey := range passkeys { + err = c.DeletePasskey(userID, passkey.ID) + if err != nil { + return + } + } + + return +} + +func (c *PasskeyController) RenamePasskey(userID int64, passkeyID uuid.UUID, newName string) (err error) { + if len(newName) < 1 || len(newName) > 32 { + err = ente.ErrInvalidName + return + } + + user, err := c.UserRepo.Get(userID) + if err != nil { + return + } + + return c.Repo.RenamePasskey(&user, passkeyID, newName) +} + +func (c *PasskeyController) BeginRegistration(userID int64) (options *protocol.CredentialCreation, session *webauthn.SessionData, sessionID uuid.UUID, err error) { + user, err := c.UserRepo.Get(userID) + if err != nil { + return + } + + return c.Repo.CreateBeginRegistrationData(&user) +} + +func (c *PasskeyController) FinishRegistration(userID int64, friendlyName string, req *http.Request, sessionID uuid.UUID) (err error) { + user, err := c.UserRepo.Get(userID) + if err != nil { + return + } + + return c.Repo.FinishRegistration(&user, friendlyName, req, sessionID) +} diff --git a/server/pkg/controller/playstore.go b/server/pkg/controller/playstore.go new file mode 100644 index 000000000..49e5774d3 --- /dev/null +++ b/server/pkg/controller/playstore.go @@ -0,0 +1,239 @@ +package controller + +import ( + "context" + "errors" + "github.com/ente-io/museum/pkg/controller/commonbilling" + "github.com/ente-io/museum/pkg/repo/storagebonus" + "os" + + "github.com/ente-io/stacktrace" + + log "github.com/sirupsen/logrus" + + "github.com/awa/go-iap/playstore" + "github.com/ente-io/museum/ente" + "github.com/ente-io/museum/pkg/repo" + "github.com/ente-io/museum/pkg/utils/config" + "github.com/ente-io/museum/pkg/utils/email" + "google.golang.org/api/androidpublisher/v3" +) + +// PlayStoreController provides abstractions for handling billing on AppStore +type PlayStoreController struct { + PlayStoreClient *playstore.Client + BillingRepo *repo.BillingRepository + FileRepo *repo.FileRepository + UserRepo *repo.UserRepository + StorageBonusRepo *storagebonus.Repository + BillingPlansPerCountry ente.BillingPlansPerCountry + CommonBillCtrl *commonbilling.Controller +} + +// PlayStorePackageName is the package name of the PlayStore item +const PlayStorePackageName = "io.ente.photos" + +// Return a new instance of PlayStoreController +func NewPlayStoreController( + plans ente.BillingPlansPerCountry, + billingRepo *repo.BillingRepository, + fileRepo *repo.FileRepository, + userRepo *repo.UserRepository, + storageBonusRepo *storagebonus.Repository, + commonBillCtrl *commonbilling.Controller, +) *PlayStoreController { + playStoreClient, err := newPlayStoreClient() + if err != nil { + log.Fatal(err) + } + // We don't do nil checks for playStoreClient in the definitions of these + // methods - if they're getting called, that means we're not in a test + // environment and so playStoreClient really should've been there. + + return &PlayStoreController{ + PlayStoreClient: playStoreClient, + BillingRepo: billingRepo, + FileRepo: fileRepo, + UserRepo: userRepo, + BillingPlansPerCountry: plans, + StorageBonusRepo: storageBonusRepo, + CommonBillCtrl: commonBillCtrl, + } +} + +func newPlayStoreClient() (*playstore.Client, error) { + playStoreCredentialsFile, err := config.CredentialFilePath("pst-service-account.json") + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + if playStoreCredentialsFile == "" { + // Can happen when running locally + return nil, nil + } + + jsonKey, err := os.ReadFile(playStoreCredentialsFile) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + playStoreClient, err := playstore.New(jsonKey) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + + return playStoreClient, nil +} + +// HandleNotification handles a PlayStore notification +func (c *PlayStoreController) HandleNotification(notification playstore.DeveloperNotification) error { + transactionID := notification.SubscriptionNotification.PurchaseToken + productID := notification.SubscriptionNotification.SubscriptionID + purchase, err := c.verifySubscription(productID, transactionID) + if err != nil { + return stacktrace.Propagate(err, "") + } + originalTransactionID := transactionID + if purchase.LinkedPurchaseToken != "" { + originalTransactionID = purchase.LinkedPurchaseToken + } + subscription, err := c.BillingRepo.GetSubscriptionForTransaction(originalTransactionID, ente.PlayStore) + if err != nil { + // First subscription, no user to link to + log.Warn("Could not find transaction against " + originalTransactionID) + log.Error(err) + return nil + } + switch notification.SubscriptionNotification.NotificationType { + case playstore.SubscriptionNotificationTypeExpired: + user, err := c.UserRepo.Get(subscription.UserID) + if err != nil { + if errors.Is(err, ente.ErrUserDeleted) { + // no-op user has already been deleted + return nil + } + return stacktrace.Propagate(err, "") + } + // send deletion email for folks who are either on individual plan or admin of a family plan + if user.FamilyAdminID == nil || *user.FamilyAdminID == subscription.UserID { + storage, surpErr := c.StorageBonusRepo.GetPaidAddonSurplusStorage(context.Background(), subscription.UserID) + if surpErr != nil { + return stacktrace.Propagate(surpErr, "") + } + if storage == nil || *storage <= 0 { + err = email.SendTemplatedEmail([]string{user.Email}, "ente", "support@ente.io", + ente.SubscriptionEndedEmailSubject, + ente.SubscriptionEndedEmailTemplate, map[string]interface{}{}, nil) + if err != nil { + return stacktrace.Propagate(err, "") + } + } else { + log.WithField("storage", storage).Info("User has surplus storage, not sending email") + } + } + // TODO: Add cron to delete files of users with expired subscriptions + case playstore.SubscriptionNotificationTypeAccountHold: + user, err := c.UserRepo.Get(subscription.UserID) + if err != nil { + return stacktrace.Propagate(err, "") + } + err = email.SendTemplatedEmail([]string{user.Email}, "ente", "support@ente.io", + ente.AccountOnHoldEmailSubject, + ente.OnHoldTemplate, map[string]interface{}{ + "PaymentProvider": "PlayStore", + }, nil) + if err != nil { + return stacktrace.Propagate(err, "") + } + case playstore.SubscriptionNotificationTypeCanceled: + err := c.BillingRepo.UpdateSubscriptionCancellationStatus(subscription.UserID, true) + if err != nil { + return stacktrace.Propagate(err, "") + } + } + if transactionID != originalTransactionID { // Upgrade, Downgrade or Resubscription + var newPlan ente.BillingPlan + plans := c.BillingPlansPerCountry["EU"] // Country code is irrelevant since Storage will be the same for a given subscriptionID + for _, plan := range plans { + if plan.AndroidID == productID { + newPlan = plan + break + } + } + if newPlan.Storage < subscription.Storage { // Downgrade + canDowngrade, canDowngradeErr := c.CommonBillCtrl.CanDowngradeToGivenStorage(newPlan.Storage, subscription.UserID) + if canDowngradeErr != nil { + return stacktrace.Propagate(canDowngradeErr, "") + } + if !canDowngrade { + return stacktrace.Propagate(ente.ErrCannotDowngrade, "") + } + log.Info("Usage is good") + } + newSubscription := ente.Subscription{ + Storage: newPlan.Storage, + ExpiryTime: purchase.ExpiryTimeMillis * 1000, + ProductID: productID, + PaymentProvider: ente.AppStore, + OriginalTransactionID: originalTransactionID, + Attributes: ente.SubscriptionAttributes{LatestVerificationData: transactionID}, + } + err = c.BillingRepo.ReplaceSubscription( + subscription.ID, + newSubscription, + ) + if err != nil { + return stacktrace.Propagate(err, "") + } + err = c.AcknowledgeSubscription(productID, transactionID) + if err != nil { + return stacktrace.Propagate(err, "") + } + } else { + err = c.BillingRepo.UpdateSubscriptionExpiryTime( + subscription.ID, purchase.ExpiryTimeMillis*1000) + if err != nil { + return stacktrace.Propagate(err, "") + } + } + return c.BillingRepo.LogPlayStorePush(subscription.UserID, notification, *purchase) +} + +// GetVerifiedSubscription verifies and returns the verified subscription +func (c *PlayStoreController) GetVerifiedSubscription(userID int64, productID string, verificationData string) (ente.Subscription, error) { + var s ente.Subscription + s.UserID = userID + s.ProductID = productID + s.PaymentProvider = ente.PlayStore + s.Attributes.LatestVerificationData = verificationData + plans := c.BillingPlansPerCountry["EU"] // Country code is irrelevant since Storage will be the same for a given subscriptionID + response, err := c.verifySubscription(productID, verificationData) + if err != nil { + return ente.Subscription{}, stacktrace.Propagate(err, "") + } + for _, plan := range plans { + if plan.AndroidID == productID { + s.Storage = plan.Storage + break + } + } + s.OriginalTransactionID = verificationData + s.ExpiryTime = response.ExpiryTimeMillis * 1000 + return s, nil +} + +// AcknowledgeSubscription acknowledges a subscription to PlayStore +func (c *PlayStoreController) AcknowledgeSubscription(subscriptionID string, token string) error { + req := &androidpublisher.SubscriptionPurchasesAcknowledgeRequest{} + context := context.Background() + return c.PlayStoreClient.AcknowledgeSubscription(context, PlayStorePackageName, subscriptionID, token, req) +} + +// CancelSubscription cancels a PlayStore subscription +func (c *PlayStoreController) CancelSubscription(subscriptionID string, verificationData string) error { + context := context.Background() + return c.PlayStoreClient.CancelSubscription(context, PlayStorePackageName, subscriptionID, verificationData) +} + +func (c *PlayStoreController) verifySubscription(subscriptionID string, verificationData string) (*androidpublisher.SubscriptionPurchase, error) { + context := context.Background() + return c.PlayStoreClient.VerifySubscription(context, PlayStorePackageName, subscriptionID, verificationData) +} diff --git a/server/pkg/controller/public_collection.go b/server/pkg/controller/public_collection.go new file mode 100644 index 000000000..76a604937 --- /dev/null +++ b/server/pkg/controller/public_collection.go @@ -0,0 +1,338 @@ +package controller + +import ( + "context" + "errors" + "fmt" + + "github.com/ente-io/museum/ente" + enteJWT "github.com/ente-io/museum/ente/jwt" + emailCtrl "github.com/ente-io/museum/pkg/controller/email" + "github.com/ente-io/museum/pkg/repo" + "github.com/ente-io/museum/pkg/utils/auth" + "github.com/ente-io/museum/pkg/utils/email" + "github.com/ente-io/museum/pkg/utils/time" + "github.com/ente-io/stacktrace" + "github.com/gin-gonic/gin" + "github.com/golang-jwt/jwt" + "github.com/lithammer/shortuuid/v3" + "github.com/sirupsen/logrus" +) + +var AllowedReasons = map[string]string{ + "COPYRIGHT": "Copyright Infringement", + "MALICIOUS_CONTENT": "Malicious Content", +} + +const ( + AccessTokenLength = 8 + // AutoDisableAbuseThreshold indicates minimum number of abuse reports post which the access token is + // automatically disabled + AutoDisableAbuseThreshold = 3 + + // DeviceLimitThreshold represents number of unique devices which can access a shared collection. (ip + user agent) + // is treated as unique device + DeviceLimitThreshold = 50 + + DeviceLimitThresholdMultiplier = 10 + + DeviceLimitWarningThreshold = 2000 + + AbuseAlertSubject = "[Alert] Abuse report received against your album on ente" + + AbuseAlertTeamSubject = "Abuse report received" + + AbuseLimitExceededSubject = "[Alert] Too many abuse reports received against your album on ente" + + AbuseAlertTemplate = "report_alert.html" + + AbuseLimitExceededTemplate = "report_limit_exceeded_alert.html" +) + +// PublicCollectionController controls share collection operations +type PublicCollectionController struct { + FileController *FileController + EmailNotificationCtrl *emailCtrl.EmailNotificationController + PublicCollectionRepo *repo.PublicCollectionRepository + CollectionRepo *repo.CollectionRepository + UserRepo *repo.UserRepository + JwtSecret []byte +} + +func (c *PublicCollectionController) CreateAccessToken(ctx context.Context, req ente.CreatePublicAccessTokenRequest) (ente.PublicURL, error) { + accessToken := shortuuid.New()[0:AccessTokenLength] + err := c.PublicCollectionRepo.Insert(ctx, req.CollectionID, accessToken, req.ValidTill, req.DeviceLimit, req.EnableCollect) + if err != nil { + if errors.Is(err, ente.ErrActiveLinkAlreadyExists) { + collectionToPubUrlMap, err2 := c.PublicCollectionRepo.GetCollectionToActivePublicURLMap(ctx, []int64{req.CollectionID}) + if err2 != nil { + return ente.PublicURL{}, stacktrace.Propagate(err2, "") + } + if publicUrls, ok := collectionToPubUrlMap[req.CollectionID]; ok { + if len(publicUrls) > 0 { + return publicUrls[0], nil + } + } + // ideally we should never reach here + return ente.PublicURL{}, stacktrace.NewError("Unexpected state") + } else { + return ente.PublicURL{}, stacktrace.Propagate(err, "") + } + } + response := ente.PublicURL{ + URL: fmt.Sprintf(repo.BaseShareURL, accessToken), + ValidTill: req.ValidTill, + DeviceLimit: req.DeviceLimit, + EnableDownload: true, + EnableCollect: req.EnableCollect, + PasswordEnabled: false, + } + return response, nil +} + +func (c *PublicCollectionController) CreateFile(ctx *gin.Context, file ente.File, app ente.App) (ente.File, error) { + collection, err := c.GetPublicCollection(ctx, true) + if err != nil { + return ente.File{}, stacktrace.Propagate(err, "") + } + collectionOwnerID := collection.Owner.ID + // Do not let any update happen via public Url + file.ID = 0 + file.OwnerID = collectionOwnerID + file.UpdationTime = time.Microseconds() + file.IsDeleted = false + createdFile, err := c.FileController.Create(ctx, collectionOwnerID, file, ctx.Request.UserAgent(), app) + if err != nil { + return ente.File{}, stacktrace.Propagate(err, "") + } + + // Note: Stop sending email notification for public collection till + // we add in-app setting to enable/disable email notifications + //go c.EmailNotificationCtrl.OnFilesCollected(file.OwnerID) + return createdFile, nil +} + +// Disable all public accessTokens generated for the given cID till date. +func (c *PublicCollectionController) Disable(ctx context.Context, cID int64) error { + err := c.PublicCollectionRepo.DisableSharing(ctx, cID) + return stacktrace.Propagate(err, "") +} + +func (c *PublicCollectionController) UpdateSharedUrl(ctx context.Context, req ente.UpdatePublicAccessTokenRequest) (ente.PublicURL, error) { + publicCollectionToken, err := c.PublicCollectionRepo.GetActivePublicCollectionToken(ctx, req.CollectionID) + if err != nil { + return ente.PublicURL{}, err + } + if req.ValidTill != nil { + publicCollectionToken.ValidTill = *req.ValidTill + } + if req.DeviceLimit != nil { + publicCollectionToken.DeviceLimit = *req.DeviceLimit + } + if req.PassHash != nil && req.Nonce != nil && req.OpsLimit != nil && req.MemLimit != nil { + publicCollectionToken.PassHash = req.PassHash + publicCollectionToken.Nonce = req.Nonce + publicCollectionToken.OpsLimit = req.OpsLimit + publicCollectionToken.MemLimit = req.MemLimit + } else if req.DisablePassword != nil && *req.DisablePassword { + publicCollectionToken.PassHash = nil + publicCollectionToken.Nonce = nil + publicCollectionToken.OpsLimit = nil + publicCollectionToken.MemLimit = nil + } + if req.EnableDownload != nil { + publicCollectionToken.EnableDownload = *req.EnableDownload + } + if req.EnableCollect != nil { + publicCollectionToken.EnableCollect = *req.EnableCollect + } + err = c.PublicCollectionRepo.UpdatePublicCollectionToken(ctx, publicCollectionToken) + if err != nil { + return ente.PublicURL{}, stacktrace.Propagate(err, "") + } + return ente.PublicURL{ + URL: fmt.Sprintf(repo.BaseShareURL, publicCollectionToken.Token), + DeviceLimit: publicCollectionToken.DeviceLimit, + ValidTill: publicCollectionToken.ValidTill, + EnableDownload: publicCollectionToken.EnableDownload, + EnableCollect: publicCollectionToken.EnableCollect, + PasswordEnabled: publicCollectionToken.PassHash != nil && *publicCollectionToken.PassHash != "", + Nonce: publicCollectionToken.Nonce, + MemLimit: publicCollectionToken.MemLimit, + OpsLimit: publicCollectionToken.OpsLimit, + }, nil +} + +// VerifyPassword verifies if the user has provided correct pw hash. If yes, it returns a signed jwt token which can be +// used by the client to pass in other requests for public collection. +// Having a separate endpoint for password validation allows us to easily rate-limit the attempts for brute-force +// attack for guessing password. +func (c *PublicCollectionController) VerifyPassword(ctx *gin.Context, req ente.VerifyPasswordRequest) (*ente.VerifyPasswordResponse, error) { + accessContext := auth.MustGetPublicAccessContext(ctx) + publicCollectionToken, err := c.PublicCollectionRepo.GetActivePublicCollectionToken(ctx, accessContext.CollectionID) + if err != nil { + return nil, stacktrace.Propagate(err, "failed to get public collection info") + } + if publicCollectionToken.PassHash == nil || *publicCollectionToken.PassHash == "" { + return nil, stacktrace.Propagate(ente.ErrBadRequest, "password is not configured for the link") + } + if req.PassHash != *publicCollectionToken.PassHash { + return nil, stacktrace.Propagate(ente.ErrInvalidPassword, "incorrect password for link") + } + token := jwt.NewWithClaims(jwt.SigningMethodHS256, &enteJWT.PublicAlbumPasswordClaim{ + PassHash: req.PassHash, + ExpiryTime: time.NDaysFromNow(365), + }) + // Sign and get the complete encoded token as a string using the secret + tokenString, err := token.SignedString(c.JwtSecret) + + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + return &ente.VerifyPasswordResponse{ + JWTToken: tokenString, + }, nil +} + +func (c *PublicCollectionController) ValidateJWTToken(ctx *gin.Context, jwtToken string, passwordHash string) error { + token, err := jwt.ParseWithClaims(jwtToken, &enteJWT.PublicAlbumPasswordClaim{}, func(token *jwt.Token) (interface{}, error) { + if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok { + return stacktrace.Propagate(fmt.Errorf("unexpected signing method: %v", token.Header["alg"]), ""), nil + } + return c.JwtSecret, nil + }) + if err != nil { + return stacktrace.Propagate(err, "JWT parsed failed") + } + claims, ok := token.Claims.(*enteJWT.PublicAlbumPasswordClaim) + + if !ok { + return stacktrace.Propagate(errors.New("no claim in jwt token"), "") + } + if token.Valid && claims.PassHash == passwordHash { + return nil + } + return ente.ErrInvalidPassword +} + +// ReportAbuse captures abuse report for a publicly shared collection. +// It will also disable the accessToken for the collection if total abuse reports for the said collection +// reaches AutoDisableAbuseThreshold +func (c *PublicCollectionController) ReportAbuse(ctx *gin.Context, req ente.AbuseReportRequest) error { + accessContext := auth.MustGetPublicAccessContext(ctx) + readableReason, found := AllowedReasons[req.Reason] + if !found { + return stacktrace.Propagate(ente.ErrBadRequest, fmt.Sprintf("unexpected reason %s", req.Reason)) + } + logrus.WithField("collectionID", accessContext.CollectionID).Error("CRITICAL: received abuse report") + + err := c.PublicCollectionRepo.RecordAbuseReport(ctx, accessContext, req.URL, req.Reason, req.Details) + if err != nil { + return stacktrace.Propagate(err, "") + } + count, err := c.PublicCollectionRepo.GetAbuseReportCount(ctx, accessContext) + if err != nil { + return stacktrace.Propagate(err, "") + } + c.onAbuseReportReceived(accessContext.CollectionID, req, readableReason, count) + if count >= AutoDisableAbuseThreshold { + logrus.WithFields(logrus.Fields{ + "collectionID": accessContext.CollectionID, + }).Warn("disabling accessTokens for shared collection due to multiple abuse reports") + return stacktrace.Propagate(c.Disable(ctx, accessContext.CollectionID), "") + } + return nil +} + +func (c *PublicCollectionController) onAbuseReportReceived(collectionID int64, report ente.AbuseReportRequest, readableReason string, abuseCount int64) { + collection, err := c.CollectionRepo.Get(collectionID) + if err != nil { + logrus.Error("Could not get collection for abuse report") + return + } + user, err := c.UserRepo.Get(collection.Owner.ID) + if err != nil { + logrus.Error("Could not get owner for abuse report") + return + } + comment := report.Details.Comment + if comment == "" { + comment = "None" + } + err = email.SendTemplatedEmail([]string{user.Email}, "abuse@ente.io", "abuse@ente.io", AbuseAlertSubject, AbuseAlertTemplate, map[string]interface{}{ + "AlbumLink": report.URL, + "Reason": readableReason, + "Comments": comment, + }, nil) + if err != nil { + logrus.Error("Error sending abuse notification ", err) + } + if abuseCount >= AutoDisableAbuseThreshold { + err = email.SendTemplatedEmail([]string{user.Email}, "abuse@ente.io", "abuse@ente.io", AbuseLimitExceededSubject, AbuseLimitExceededTemplate, nil, nil) + if err != nil { + logrus.Error("Error sending abuse limit exceeded notification ", err) + } + } + err = email.SendTemplatedEmail([]string{"team@ente.io"}, "abuse@ente.io", "abuse@ente.io", AbuseAlertTeamSubject, AbuseAlertTemplate, map[string]interface{}{ + "AlbumLink": report.URL, + "Reason": readableReason, + "Comments": comment, + }, nil) + if err != nil { + logrus.Error("Error notifying team about abuse ", err) + } +} + +func (c *PublicCollectionController) HandleAccountDeletion(ctx context.Context, userID int64, logger *logrus.Entry) error { + logger.Info("updating public collection on account deletion") + collectionIDs, err := c.PublicCollectionRepo.GetActivePublicTokenForUser(ctx, userID) + if err != nil { + return stacktrace.Propagate(err, "") + } + logger.WithField("cIDs", collectionIDs).Info("disable public tokens due to account deletion") + for _, collectionID := range collectionIDs { + err = c.Disable(ctx, collectionID) + if err != nil { + return stacktrace.Propagate(err, "") + } + } + return nil +} + +// GetPublicCollection will return collection info for a public url. +// is mustAllowCollect is set to true but the underlying collection doesn't allow uploading +func (c *PublicCollectionController) GetPublicCollection(ctx *gin.Context, mustAllowCollect bool) (ente.Collection, error) { + accessContext := auth.MustGetPublicAccessContext(ctx) + collection, err := c.CollectionRepo.Get(accessContext.CollectionID) + if err != nil { + return ente.Collection{}, stacktrace.Propagate(err, "") + } + if collection.IsDeleted { + return ente.Collection{}, stacktrace.Propagate(ente.ErrNotFound, "collection is deleted") + } + // hide redundant/private information + collection.Sharees = nil + collection.MagicMetadata = nil + publicURLsWithLimitedInfo := make([]ente.PublicURL, 0) + for _, publicUrl := range collection.PublicURLs { + publicURLsWithLimitedInfo = append(publicURLsWithLimitedInfo, ente.PublicURL{ + EnableDownload: publicUrl.EnableDownload, + EnableCollect: publicUrl.EnableCollect, + PasswordEnabled: publicUrl.PasswordEnabled, + Nonce: publicUrl.Nonce, + MemLimit: publicUrl.MemLimit, + OpsLimit: publicUrl.OpsLimit, + }) + } + collection.PublicURLs = publicURLsWithLimitedInfo + if mustAllowCollect { + if len(publicURLsWithLimitedInfo) != 1 { + errorMsg := fmt.Sprintf("Unexpected number of public urls: %d", len(publicURLsWithLimitedInfo)) + return ente.Collection{}, stacktrace.Propagate(ente.NewInternalError(errorMsg), "") + } + if !publicURLsWithLimitedInfo[0].EnableCollect { + return ente.Collection{}, stacktrace.Propagate(&ente.ErrPublicCollectDisabled, "") + } + } + return collection, nil +} diff --git a/server/pkg/controller/push.go b/server/pkg/controller/push.go new file mode 100644 index 000000000..e07a32c87 --- /dev/null +++ b/server/pkg/controller/push.go @@ -0,0 +1,188 @@ +package controller + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "strconv" + + firebase "firebase.google.com/go" + "firebase.google.com/go/messaging" + "github.com/ente-io/museum/ente" + "github.com/ente-io/museum/pkg/repo" + "github.com/ente-io/museum/pkg/utils/config" + "github.com/ente-io/museum/pkg/utils/time" + "github.com/ente-io/stacktrace" + log "github.com/sirupsen/logrus" + "github.com/spf13/viper" + "google.golang.org/api/option" +) + +// PushController controls all push related operations +type PushController struct { + PushRepo *repo.PushTokenRepository + TaskLockRepo *repo.TaskLockRepository + HostName string + FirebaseClient *messaging.Client +} + +type PushToken struct { + UserID int64 + FCMToken *string + APNSToken *string + CreatedAt int64 + UpdatedAt int64 + LastNotifiedAt int64 +} + +// Interval before which the last push was sent +const pushIntervalInMinutes = 60 + +// Limit defined by FirebaseClient.SendAll(...) +const concurrentPushesInOneShot = 500 + +const taskLockName = "fcm-push-lock" + +const taskLockDurationInMinutes = 5 + +// As proposed by https://firebase.google.com/docs/cloud-messaging/manage-tokens#ensuring-registration-token-freshness +const tokenExpiryDurationInDays = 61 + +func NewPushController(pushRepo *repo.PushTokenRepository, taskLockRepo *repo.TaskLockRepository, hostName string) *PushController { + client, err := newFirebaseClient() + if err != nil { + log.Error(fmt.Errorf("error creating Firebase client: %v", err)) + } + return &PushController{PushRepo: pushRepo, TaskLockRepo: taskLockRepo, HostName: hostName, FirebaseClient: client} +} + +func newFirebaseClient() (*messaging.Client, error) { + firebaseCredentialsFile, err := config.CredentialFilePath("fcm-service-account.json") + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + if firebaseCredentialsFile == "" { + // Can happen when running locally + return nil, nil + } + + opt := option.WithCredentialsFile(firebaseCredentialsFile) + app, err := firebase.NewApp(context.Background(), nil, opt) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + client, err := app.Messaging(context.Background()) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + + return client, nil +} + +func (c *PushController) AddToken(userID int64, token ente.PushTokenRequest) error { + return stacktrace.Propagate(c.PushRepo.AddToken(userID, token), "") +} + +func (c *PushController) RemoveTokensForUser(userID int64) error { + return stacktrace.Propagate(c.PushRepo.RemoveTokensForUser(userID), "") +} + +func (c *PushController) SendPushes() { + lockStatus, err := c.TaskLockRepo.AcquireLock(taskLockName, + time.MicrosecondsAfterMinutes(taskLockDurationInMinutes), c.HostName) + if err != nil { + log.Error("Unable to acquire lock to send pushes", err) + return + } + if !lockStatus { + log.Info("Skipping sending pushes since there is an existing lock to send pushes") + return + } + defer c.releaseTaskLock() + + tokens, err := c.PushRepo.GetTokensToBeNotified(time.MicrosecondsBeforeMinutes(pushIntervalInMinutes), + concurrentPushesInOneShot) + if err != nil { + log.Error(fmt.Errorf("error fetching tokens to be notified: %v", err)) + return + } + + err = c.sendFCMPushes(tokens, map[string]string{"action": "sync"}) + if err != nil { + log.Error(fmt.Errorf("error sending pushes: %v", err)) + return + } + + c.updateLastNotificationTime(tokens) +} + +func (c *PushController) ClearExpiredTokens() { + err := c.PushRepo.RemoveTokensOlderThan(time.NDaysFromNow(-1 * tokenExpiryDurationInDays)) + if err != nil { + log.Errorf("Error while removing older tokens %s", err) + } else { + log.Info("Cleared expired FCM tokens") + } +} + +func (c *PushController) releaseTaskLock() { + err := c.TaskLockRepo.ReleaseLock(taskLockName) + if err != nil { + log.Errorf("Error while releasing lock %s", err) + } +} + +func (c *PushController) updateLastNotificationTime(pushTokens []ente.PushToken) { + err := c.PushRepo.SetLastNotificationTimeToNow(pushTokens) + if err != nil { + log.Error(fmt.Errorf("error updating last notified at times: %v", err)) + } +} + +func (c *PushController) sendFCMPushes(pushTokens []ente.PushToken, payload map[string]string) error { + firebaseClient := c.FirebaseClient + silent := viper.GetBool("internal.silent") + if silent || firebaseClient == nil { + if len(pushTokens) > 0 { + log.Info("Skipping sending pushes to " + strconv.Itoa(len(pushTokens)) + " devices") + } + return nil + } + + log.Info("Sending pushes to " + strconv.Itoa(len(pushTokens)) + " devices") + if len(pushTokens) == 0 { + return nil + } + if len(pushTokens) > concurrentPushesInOneShot { + return errors.New("cannot send these many pushes in one shot") + } + marshal, _ := json.Marshal(pushTokens) + log.WithField("devices", string(marshal)).Info("push to following devices") + fcmTokens := make([]string, 0) + for _, pushTokenData := range pushTokens { + fcmTokens = append(fcmTokens, pushTokenData.FCMToken) + } + + message := &messaging.MulticastMessage{ + Tokens: fcmTokens, + Data: payload, + Android: &messaging.AndroidConfig{Priority: "high"}, + APNS: &messaging.APNSConfig{ + Headers: map[string]string{ + "apns-push-type": "background", + "apns-priority": "5", // Must be `5` when `contentAvailable` is set to true. + "apns-topic": "io.ente.frame", // bundle identifier + }, + Payload: &messaging.APNSPayload{Aps: &messaging.Aps{ContentAvailable: true}}, + }, + } + result, err := firebaseClient.SendMulticast(context.Background(), message) + if err != nil { + return stacktrace.Propagate(err, "Error sending pushes") + } else { + log.Info("Send push result: success count: " + strconv.Itoa(result.SuccessCount) + + ", failure count: " + strconv.Itoa(result.FailureCount)) + return nil + } +} diff --git a/server/pkg/controller/remotestore/controller.go b/server/pkg/controller/remotestore/controller.go new file mode 100644 index 000000000..d41bf7e5f --- /dev/null +++ b/server/pkg/controller/remotestore/controller.go @@ -0,0 +1,36 @@ +package remotestore + +import ( + "database/sql" + "errors" + + "github.com/ente-io/museum/ente" + "github.com/ente-io/museum/pkg/repo/remotestore" + "github.com/ente-io/museum/pkg/utils/auth" + "github.com/ente-io/stacktrace" + "github.com/gin-gonic/gin" +) + +// Controller is interface for exposing business logic related to for remote store +type Controller struct { + Repo *remotestore.Repository +} + +// Insert of update the key's value +func (c *Controller) InsertOrUpdate(ctx *gin.Context, request ente.UpdateKeyValueRequest) error { + userID := auth.GetUserID(ctx.Request.Header) + return c.Repo.InsertOrUpdate(ctx, userID, request.Key, request.Value) +} + +func (c *Controller) Get(ctx *gin.Context, req ente.GetValueRequest) (*ente.GetValueResponse, error) { + userID := auth.GetUserID(ctx.Request.Header) + value, err := c.Repo.GetValue(ctx, userID, req.Key) + if err != nil { + if errors.Is(err, sql.ErrNoRows) && req.DefaultValue != nil { + return &ente.GetValueResponse{Value: *req.DefaultValue}, nil + } else { + return nil, stacktrace.Propagate(err, "") + } + } + return &ente.GetValueResponse{Value: value}, nil +} diff --git a/server/pkg/controller/replication3.go b/server/pkg/controller/replication3.go new file mode 100644 index 000000000..ec949cf4c --- /dev/null +++ b/server/pkg/controller/replication3.go @@ -0,0 +1,625 @@ +package controller + +import ( + "database/sql" + "encoding/base64" + "errors" + "fmt" + "io" + "net/http" + "os" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go/service/s3/s3manager" + "github.com/ente-io/museum/pkg/controller/discord" + "github.com/ente-io/museum/pkg/repo" + "github.com/ente-io/museum/pkg/utils/file" + "github.com/ente-io/museum/pkg/utils/s3config" + "github.com/ente-io/stacktrace" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + log "github.com/sirupsen/logrus" + "github.com/spf13/viper" +) + +// ReplicationController3 oversees version 3 of our object replication. +// +// The user's encrypted data starts off in 1 hot storage (Backblaze "b2"). This +// controller then takes over and replicates it the other two replicas. It keeps +// state in the object_copies table. +// +// Both v2 and v3 of object replication use the same hot storage (b2), but they +// replicate to different buckets thereafter. +// +// The current implementation only works if the hot storage is b2. This is not +// an inherent limitation, however the code has not yet been tested in other +// scenarios, so there is a safety check preventing the replication from +// happening if the current hot storage is not b2. +type ReplicationController3 struct { + S3Config *s3config.S3Config + ObjectRepo *repo.ObjectRepository + ObjectCopiesRepo *repo.ObjectCopiesRepository + DiscordController *discord.DiscordController + // URL of the Cloudflare worker to use for downloading the source object + workerURL string + // Base directory for temporary storage + tempStorage string + // Prometheus Metrics + mUploadSuccess *prometheus.CounterVec + mUploadFailure *prometheus.CounterVec + // Cached S3 clients etc + b2Client *s3.S3 + b2Bucket *string + wasabiDest *UploadDestination + scwDest *UploadDestination +} + +type UploadDestination struct { + DC string + Client *s3.S3 + Uploader *s3manager.Uploader + Bucket *string + // The label to use for reporting metrics for uploads to this destination + Label string + // If true, we should ignore Wasabi 403 errors. See "Reuploads". + HasComplianceHold bool + // If true, the object is uploaded to the GLACIER class. + IsGlacier bool +} + +// StartReplication starts the background replication process. +// +// This method returns synchronously. ReplicationController3 will create +// suitable number of goroutines to parallelize and perform the replication +// asynchronously, as and when it notices new files that have not yet been +// replicated (it does this by querying the object_copies table). +func (c *ReplicationController3) StartReplication() error { + // As a safety check, ensure that the current hot storage bucket is in b2. + // This is because the replication v3 code has not yet been tested for other + // scenarios (it'll likely work though, probably with minor modifications). + hotDC := c.S3Config.GetHotDataCenter() + if hotDC != c.S3Config.GetHotBackblazeDC() { + return fmt.Errorf("v3 replication can currently only run when the primary hot data center is Backblaze. Instead, it was %s", hotDC) + } + + workerURL := viper.GetString("replication.worker-url") + if workerURL == "" { + return fmt.Errorf("replication.worker-url was not defined") + } + c.workerURL = workerURL + log.Infof("Worker URL to download objects for replication v3 is: %s", workerURL) + + c.createMetrics() + err := c.createTemporaryStorage() + if err != nil { + return err + } + c.createDestinations() + + workerCount := viper.GetInt("replication.worker-count") + if workerCount == 0 { + workerCount = 6 + } + + go c.startWorkers(workerCount) + + return nil +} + +func (c *ReplicationController3) startWorkers(n int) { + log.Infof("Starting %d workers for replication v3", n) + + for i := 0; i < n; i++ { + go c.replicate(i) + // Stagger the workers + time.Sleep(time.Duration(2*i+1) * time.Second) + } +} + +func (c *ReplicationController3) createMetrics() { + c.mUploadSuccess = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "museum_replication_upload_success_total", + Help: "Number of successful uploads during replication (each replica is counted separately)", + }, []string{"destination"}) + c.mUploadFailure = promauto.NewCounterVec(prometheus.CounterOpts{ + Name: "museum_replication_upload_failure_total", + Help: "Number of failed uploads during replication (each replica is counted separately)", + }, []string{"destination"}) +} + +func (c *ReplicationController3) createTemporaryStorage() error { + tempStorage := viper.GetString("replication.tmp-storage") + if tempStorage == "" { + tempStorage = "tmp/replication" + } + + log.Infof("Temporary storage for replication v3 is: %s", tempStorage) + + err := file.DeleteAllFilesInDirectory(tempStorage) + if err != nil { + return stacktrace.Propagate(err, "Failed to deleting old files from %s", tempStorage) + } + + err = file.MakeDirectoryIfNotExists(tempStorage) + if err != nil { + return stacktrace.Propagate(err, "Failed to create temporary storage %s", tempStorage) + } + + c.tempStorage = tempStorage + + return nil +} + +func (c *ReplicationController3) createDestinations() { + // The s3manager.Uploader objects are safe for use concurrently. From the + // AWS docs: + // + // > The Uploader structure that calls Upload(). It is safe to call Upload() + // on this structure for multiple objects and across concurrent goroutines. + // Mutating the Uploader's properties is not safe to be done concurrently. + + config := c.S3Config + + b2DC := config.GetHotBackblazeDC() + b2Client := config.GetS3Client(b2DC) + c.b2Client = &b2Client + c.b2Bucket = config.GetBucket(b2DC) + + wasabiDC := config.GetHotWasabiDC() + wasabiClient := config.GetS3Client(wasabiDC) + c.wasabiDest = &UploadDestination{ + DC: wasabiDC, + Client: &wasabiClient, + Uploader: s3manager.NewUploaderWithClient(&wasabiClient), + Bucket: config.GetBucket(wasabiDC), + Label: "wasabi", + HasComplianceHold: config.WasabiComplianceDC() == wasabiDC, + } + + scwDC := config.GetColdScalewayDC() + scwClient := config.GetS3Client(scwDC) + c.scwDest = &UploadDestination{ + DC: scwDC, + Client: &scwClient, + Uploader: s3manager.NewUploaderWithClient(&scwClient), + Bucket: config.GetBucket(scwDC), + Label: "scaleway", + // should be true, except when running in a local cluster (since minio doesn't + // support specifying the GLACIER storage class). + IsGlacier: !config.AreLocalBuckets(), + } +} + +// Entry point for the replication worker (goroutine) +// +// i is an arbitrary index of the current routine. +func (c *ReplicationController3) replicate(i int) { + // This is just + // + // while (true) { replicate() } + // + // but with an extra sleep for a bit if nothing got replicated - both when + // something's wrong, or there's nothing to do. + for { + err := c.tryReplicate() + if err != nil { + // Sleep in proportion to the (arbitrary) index to space out the + // workers further. + time.Sleep(time.Duration(i+1) * time.Minute) + } + } +} + +// Try to replicate an object. +// +// Return nil if something was replicated, otherwise return the error. +// +// A common and expected error is `sql.ErrNoRows`, which occurs if there are no +// objects left to replicate currently. +func (c *ReplicationController3) tryReplicate() error { + // Fetch an object to replicate + tx, copies, err := c.ObjectCopiesRepo.GetAndLockUnreplicatedObject() + if err != nil { + if !errors.Is(err, sql.ErrNoRows) { + log.Errorf("Could not fetch an object to replicate: %s", err) + } + return stacktrace.Propagate(err, "") + } + + objectKey := copies.ObjectKey + + logger := log.WithFields(log.Fields{ + "task": "replication", + "object_key": objectKey, + }) + + commit := func(err error) error { + // We don't rollback the transaction even in the case of errors, and + // instead try to commit it after setting the last_attempt timestamp. + // + // This avoids the replication getting stuck in a loop trying (and + // failing) to replicate the same object. The error would still need to + // be resolved, but at least the replication would meanwhile move + // forward, ignoring this row. + + if err != nil { + logger.Error(err) + } + + aerr := c.ObjectCopiesRepo.RegisterReplicationAttempt(tx, objectKey) + if aerr != nil { + aerr = stacktrace.Propagate(aerr, "Failed to mark replication attempt") + logger.Error(aerr) + } + + cerr := tx.Commit() + if cerr != nil { + cerr = stacktrace.Propagate(err, "Failed to commit transaction") + logger.Error(cerr) + } + + if err == nil { + err = aerr + } + if err == nil { + err = cerr + } + + if err == nil { + logger.Info("Replication attempt succeeded") + } else { + logger.Info("Replication attempt failed") + } + + return err + } + + logger.Info("Replication attempt start") + + if copies.B2 == nil { + err := errors.New("expected B2 copy to be in place before we start replication") + return commit(stacktrace.Propagate(err, "Sanity check failed")) + } + + if !copies.WantWasabi && !copies.WantSCW { + err := errors.New("expected at least one of want_wasabi and want_scw to be true when trying to replicate") + return commit(stacktrace.Propagate(err, "Sanity check failed")) + } + + ob, err := c.ObjectRepo.GetObjectState(tx, objectKey) + if err != nil { + return commit(stacktrace.Propagate(err, "Failed to fetch file's deleted status")) + } + + if ob.IsFileDeleted || ob.IsUserDeleted { + // Update the object_copies to mark this object as not requiring further + // replication. The row in object_copies will get deleted when the next + // scheduled object deletion runs. + err = c.ObjectCopiesRepo.UnmarkFromReplication(tx, objectKey) + if err != nil { + return commit(stacktrace.Propagate(err, "Failed to mark an object not requiring further replication")) + } + logger.Infof("Skipping replication for deleted object (isFileDeleted = %v, isUserDeleted = %v)", + ob.IsFileDeleted, ob.IsUserDeleted) + return commit(nil) + } + + err = ensureSufficientSpace(ob.Size) + if err != nil { + // We don't have free space right now, maybe because other big files are + // being downloaded simultanously, but we might get space later, so mark + // a failed attempt that'll get retried later. + // + // Log this error though, so that it gets noticed if it happens too + // frequently (the instance might need a bigger disk). + return commit(stacktrace.Propagate(err, "")) + } + + filePath, file, err := c.createTemporaryFile(objectKey) + if err != nil { + return commit(stacktrace.Propagate(err, "Failed to create temporary file")) + } + defer os.Remove(filePath) + defer file.Close() + + size, err := c.downloadFromB2ViaWorker(objectKey, file, logger) + if err != nil { + return commit(stacktrace.Propagate(err, "Failed to download object from B2")) + } + logger.Infof("Downloaded %d bytes to %s", size, filePath) + + in := &UploadInput{ + File: file, + ObjectKey: objectKey, + ExpectedSize: size, + Logger: logger, + } + + err = nil + + if copies.WantWasabi && copies.Wasabi == nil { + werr := c.replicateFile(in, c.wasabiDest, func() error { + return c.ObjectCopiesRepo.MarkObjectReplicatedWasabi(tx, objectKey) + }) + err = werr + } + + if copies.WantSCW && copies.SCW == nil { + serr := c.replicateFile(in, c.scwDest, func() error { + return c.ObjectCopiesRepo.MarkObjectReplicatedScaleway(tx, objectKey) + }) + if err == nil { + err = serr + } + } + + return commit(err) +} + +// Return an error if we risk running out of disk space if we try to download +// and write a file of size. +// +// This function keeps a buffer of 1 GB free space in its calculations. +func ensureSufficientSpace(size int64) error { + free, err := file.FreeSpace("/") + if err != nil { + return stacktrace.Propagate(err, "Failed to fetch free space") + } + + gb := uint64(1024) * 1024 * 1024 + need := uint64(size) + (2 * gb) + if free < need { + return fmt.Errorf("insufficient space on disk (need %d bytes, free %d bytes)", size, free) + } + + return nil +} + +// Create a temporary file for storing objectKey. Return both the path to the +// file, and the handle to the file. +// +// The caller must Close() the returned file if it is not nil. +func (c *ReplicationController3) createTemporaryFile(objectKey string) (string, *os.File, error) { + fileName := strings.ReplaceAll(objectKey, "/", "_") + filePath := c.tempStorage + "/" + fileName + f, err := os.Create(filePath) + if err != nil { + return "", nil, stacktrace.Propagate(err, "Could not create temporary file at '%s' to download object", filePath) + } + return filePath, f, nil +} + +// Download the object for objectKey from B2 hot storage, writing it into file. +// +// Return the size of the downloaded file. +func (c *ReplicationController3) downloadFromB2ViaWorker(objectKey string, file *os.File, logger *log.Entry) (int64, error) { + presignedURL, err := c.getPresignedB2URL(objectKey) + if err != nil { + return 0, stacktrace.Propagate(err, "Could not create create presigned URL for downloading object") + } + + presignedEncodedURL := base64.StdEncoding.EncodeToString([]byte(presignedURL)) + + client := &http.Client{} + + request, err := http.NewRequest("GET", c.workerURL, nil) + if err != nil { + return 0, stacktrace.Propagate(err, "Could not create request for worker %s", c.workerURL) + } + + q := request.URL.Query() + q.Add("src", presignedEncodedURL) + request.URL.RawQuery = q.Encode() + + if c.S3Config.AreLocalBuckets() { + originalURL := request.URL + request, err = http.NewRequest("GET", presignedURL, nil) + if err != nil { + return 0, stacktrace.Propagate(err, "Could not create request for URL %s", presignedURL) + } + logger.Infof("Bypassing workerURL %s and instead directly GETting %s", originalURL, presignedURL) + } + + response, err := client.Do(request) + if err != nil { + return 0, stacktrace.Propagate(err, "Call to CF worker failed for object %s", objectKey) + } + defer response.Body.Close() + + if response.StatusCode != http.StatusOK { + if response.StatusCode == http.StatusNotFound { + c.notifyDiscord("🔥 Could not find object in HotStorage: " + objectKey) + } + err = fmt.Errorf("CF Worker GET for object %s failed with HTTP status %s", objectKey, response.Status) + return 0, stacktrace.Propagate(err, "") + } + + n, err := io.Copy(file, response.Body) + if err != nil { + return 0, stacktrace.Propagate(err, "Failed to write HTTP response to file") + } + + return n, nil +} + +// Get a presigned URL to download the object with objectKey from the B2 bucket. +func (c *ReplicationController3) getPresignedB2URL(objectKey string) (string, error) { + r, _ := c.b2Client.GetObjectRequest(&s3.GetObjectInput{ + Bucket: c.b2Bucket, + Key: &objectKey, + }) + return r.Presign(PreSignedRequestValidityDuration) +} + +func (c *ReplicationController3) notifyDiscord(message string) { + c.DiscordController.Notify(message) +} + +type UploadInput struct { + File *os.File + ObjectKey string + ExpectedSize int64 + Logger *log.Entry +} + +// Upload, verify and then update the DB to mark replication to dest. +func (c *ReplicationController3) replicateFile(in *UploadInput, dest *UploadDestination, dbUpdateCopies func() error) error { + logger := in.Logger.WithFields(log.Fields{ + "destination": dest.Label, + "bucket": *dest.Bucket, + }) + + failure := func(err error) error { + c.mUploadFailure.WithLabelValues(dest.Label).Inc() + logger.Error(err) + return err + } + + err := c.uploadFile(in, dest) + if err != nil { + return failure(stacktrace.Propagate(err, "Failed to upload object")) + } + + err = c.verifyUploadedFileSize(in, dest) + if err != nil { + return failure(stacktrace.Propagate(err, "Failed to verify upload")) + } + + // The update of the object_keys is not done in the transaction where the + // other updates to object_copies table are made. This is so that the + // object_keys table (which is what'll be used to delete objects) is + // (almost) always updated if the file gets uploaded successfully. + // + // The only time the update wouldn't happen is if museum gets restarted + // between the successful completion of the upload to the bucket and this + // query getting executed. + // + // While possible, that is a much smaller window as compared to the + // transaction for updating object_copies, which could easily span minutes + // as the transaction ends only after the object has been uploaded to all + // replicas. + rowsAffected, err := c.ObjectRepo.MarkObjectReplicated(in.ObjectKey, dest.DC) + if err != nil { + return failure(stacktrace.Propagate(err, "Failed to update object_keys to mark replication as completed")) + } + + if rowsAffected != 1 { + // It is possible that this row was updated earlier, after an upload + // that got completed but before object_copies table could be updated in + // the transaction (See "Reuploads"). + // + // So do not treat this as an error. + logger.Warnf("Expected 1 row to be updated, but got %d", rowsAffected) + } + + err = dbUpdateCopies() + if err != nil { + return failure(stacktrace.Propagate(err, "Failed to update object_copies to mark replication as complete")) + } + + c.mUploadSuccess.WithLabelValues(dest.Label).Inc() + return nil +} + +// Upload the given file to using uploader to the given bucket. +// +// # Reuploads +// +// It is possible that the object might already exist on remote. The known +// scenario where this might happen is if museum gets restarted after having +// completed the upload but before it got around to modifying the DB. +// +// The behaviour in this case is remote dependent. +// +// - Uploading an object with the same key on Scaleway would work normally. +// +// - But trying to add an object with the same key on the compliance locked +// Wasabi would return an HTTP 403. +// +// We intercept the Wasabi 403 in this case and move ahead. The subsequent +// object verification using the HEAD request will act as a sanity check for +// the object. +func (c *ReplicationController3) uploadFile(in *UploadInput, dest *UploadDestination) error { + // Rewind the file pointer back to the start for the next upload. + in.File.Seek(0, io.SeekStart) + + up := s3manager.UploadInput{ + Bucket: dest.Bucket, + Key: &in.ObjectKey, + Body: in.File, + } + if dest.IsGlacier { + up.StorageClass = aws.String(s3.ObjectStorageClassGlacier) + } + + result, err := dest.Uploader.Upload(&up) + if err != nil && dest.HasComplianceHold && c.isRequestFailureAccessDenied(err) { + in.Logger.Infof("Ignoring object that already exists on remote (we'll verify it using a HEAD check): %s", err) + return nil + } + if err != nil { + return stacktrace.Propagate(err, "Upload to bucket %s failed", *dest.Bucket) + } + + in.Logger.Infof("Uploaded to bucket %s: %s", *dest.Bucket, result.Location) + + return nil +} + +// Return true if the given error is because of an HTTP 403. +// +// See "Reuploads" for the scenario where these errors can arise. +// +// Specifically, this in an example of the HTTP 403 response we get when +// trying to add an object to a Wasabi bucket that already has a compliance +// locked object with the same key. +// +// HTTP/1.1 403 Forbidden +// Content-Type: application/xml +// Date: Tue, 20 Dec 2022 10:23:33 GMT +// Server: WasabiS3/7.10.1193-2022-11-23-84c72037e8 (head2) +// +// +// +// AccessDenied +// Access Denied +// yyy +// zzz +// +// +// Printing the error type and details produces this: +// +// type: *s3err.RequestFailure +// AccessDenied: Access Denied +// status code: 403, request id: yyy, host id: zzz +func (c *ReplicationController3) isRequestFailureAccessDenied(err error) bool { + if reqerr, ok := err.(s3.RequestFailure); ok { + if reqerr.Code() == "AccessDenied" { + return true + } + } + return false +} + +// Verify the uploaded file by doing a HEAD check and comparing sizes +func (c *ReplicationController3) verifyUploadedFileSize(in *UploadInput, dest *UploadDestination) error { + res, err := dest.Client.HeadObject(&s3.HeadObjectInput{ + Bucket: dest.Bucket, + Key: &in.ObjectKey, + }) + if err != nil { + return stacktrace.Propagate(err, "Fetching object info from bucket %s failed", *dest.Bucket) + } + + if *res.ContentLength != in.ExpectedSize { + err = fmt.Errorf("size of the uploaded file (%d) does not match the expected size (%d) in bucket %s", + *res.ContentLength, in.ExpectedSize, *dest.Bucket) + c.notifyDiscord(fmt.Sprint(err)) + return stacktrace.Propagate(err, "") + } + + return nil +} diff --git a/server/pkg/controller/storagebonus/bonus.go b/server/pkg/controller/storagebonus/bonus.go new file mode 100644 index 000000000..4c07eefc5 --- /dev/null +++ b/server/pkg/controller/storagebonus/bonus.go @@ -0,0 +1,59 @@ +package storagebonus + +import ( + "context" + + entity "github.com/ente-io/museum/ente/storagebonus" + "github.com/ente-io/stacktrace" + "github.com/gin-gonic/gin" + "github.com/sirupsen/logrus" +) + +// GetActiveReferralBonusValue returns the storage bonus value for the user +func (c *Controller) GetActiveReferralBonusValue(ctx context.Context, userID int64) (*int64, error) { + return c.StorageBonus.ActiveStorageSurplusOfType(ctx, userID, []entity.BonusType{entity.Referral, entity.SignUp}) +} + +// GetStorageBonusDetailResponse returns the storage bonus detail for the user +func (c *Controller) GetStorageBonusDetailResponse(ctx *gin.Context, userID int64) (*entity.GetStorageBonusDetailResponse, error) { + + user, err := c.UserRepo.Get(userID) + if err != nil { + return nil, stacktrace.Propagate(err, "failed to get user") + } + bonusUserID := userID + if user.FamilyAdminID != nil { + bonusUserID = *user.FamilyAdminID + logrus.Info("sharing bonus details of family admin") + } + storageBonuses, err := c.StorageBonus.GetStorageBonuses(ctx, bonusUserID) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + refStats, err := c.StorageBonus.GetUserReferralStats(ctx, bonusUserID) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + hasAppliedCode := false + // check if storage bonus has type SIGNUP and if it is, set hasAppliedCode to true + for _, bonus := range storageBonuses { + if bonus.Type == entity.SignUp { + hasAppliedCode = true + break + } + } + totalReferralCount := 0 + totalReferralUpgradeCount := 0 + for _, stat := range refStats { + totalReferralCount += stat.TotalCount + totalReferralUpgradeCount += stat.UpgradedCount + } + return &entity.GetStorageBonusDetailResponse{ + Bonuses: storageBonuses, + ReferralStats: refStats, + HasAppliedCode: hasAppliedCode, + RefCount: totalReferralCount, + RefUpgradeCount: totalReferralUpgradeCount, + }, nil + +} diff --git a/server/pkg/controller/storagebonus/referral.go b/server/pkg/controller/storagebonus/referral.go new file mode 100644 index 000000000..b452484f4 --- /dev/null +++ b/server/pkg/controller/storagebonus/referral.go @@ -0,0 +1,160 @@ +package storagebonus + +import ( + "database/sql" + "errors" + "fmt" + + "github.com/ente-io/museum/ente" + entity "github.com/ente-io/museum/ente/storagebonus" + "github.com/ente-io/museum/pkg/controller/email" + "github.com/ente-io/museum/pkg/controller/lock" + + "github.com/ente-io/museum/pkg/repo" + "github.com/ente-io/museum/pkg/repo/storagebonus" + "github.com/ente-io/museum/pkg/utils/auth" + enteTime "github.com/ente-io/museum/pkg/utils/time" + "github.com/ente-io/stacktrace" + "github.com/gin-gonic/gin" +) + +const ( + codeLength = 6 + referralAmountInGb = 10 + maxClaimableReferralAmount = 2000 + numOfDaysToClaimReferral = 32 + defaultPlanType = entity.TenGbOnUpgrade +) + +// Controller exposes functions to interact with family module +type Controller struct { + UserRepo *repo.UserRepository + StorageBonus *storagebonus.Repository + LockController *lock.LockController + CronRunning bool + EmailNotificationController *email.EmailNotificationController +} + +func (c *Controller) GetUserReferralView(ctx *gin.Context) (*entity.GetUserReferralView, error) { + // Get the user id from the context + userID := auth.GetUserID(ctx.Request.Header) + + // Use goroutines to fetch UserRepo.Get, HasAppliedReferral + user, err := c.UserRepo.Get(userID) + if err != nil { + return nil, stacktrace.Propagate(err, "failed to get user") + } + appliedReferral, err := c.StorageBonus.HasAppliedReferral(ctx, userID) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + isFamilyMember := user.FamilyAdminID != nil && *user.FamilyAdminID != userID + enableApplyCode := !appliedReferral && user.CreationTime > enteTime.MicrosecondBeforeDays(numOfDaysToClaimReferral) && !isFamilyMember + // Get the referral code for the user or family admin + codeUser := userID + if isFamilyMember { + codeUser = *user.FamilyAdminID + } + referralCode, err2 := c.GetOrCreateReferralCode(ctx, codeUser) + if err2 != nil { + return nil, stacktrace.Propagate(err2, "failed to get or create referral code") + } + storageClaimed, err2 := c.GetActiveReferralBonusValue(ctx, codeUser) + if err2 != nil { + return nil, stacktrace.Propagate(err2, "failed to get storage claimed") + } + + return &entity.GetUserReferralView{ + PlanInfo: entity.PlanInfo{ + IsEnabled: true, + PlanType: defaultPlanType, + StorageInGB: referralAmountInGb, + MaxClaimableStorageInGB: maxClaimableReferralAmount, + }, + Code: referralCode, + EnableApplyCode: enableApplyCode, + IsFamilyMember: isFamilyMember, + HasAppliedCode: appliedReferral, + ClaimedStorage: *storageClaimed, + }, nil +} + +func (c *Controller) ApplyReferralCode(ctx *gin.Context, code string) error { + // Get user id from the context + userID := auth.GetUserID(ctx.Request.Header) + user, err := c.UserRepo.Get(userID) + if err != nil { + return stacktrace.Propagate(err, "failed to get user") + } + + codeOwnerID, err := c.StorageBonus.GetUserIDByCode(ctx, code) + if err != nil { + return stacktrace.Propagate(err, "failed to get user id by code") + } + // Verify that the codeOwnerID is not deleted yet + _, err = c.UserRepo.Get(*codeOwnerID) + if err != nil { + if errors.Is(err, ente.ErrUserDeleted) { + return stacktrace.Propagate(entity.InvalidCodeErr, "code belongs to deleted user") + } + return stacktrace.Propagate(err, "failed to get user") + } + + if user.CreationTime < enteTime.MicrosecondBeforeDays(numOfDaysToClaimReferral) { + return stacktrace.Propagate(entity.CanNotApplyCodeErr, "account is too old to apply code") + } else if user.FamilyAdminID != nil && userID != *user.FamilyAdminID { + return stacktrace.Propagate(entity.CanNotApplyCodeErr, "user is member of a family plan") + } + + err = c.StorageBonus.TrackReferralAndInviteeBonus(ctx, userID, *codeOwnerID, defaultPlanType) + if err != nil { + return stacktrace.Propagate(err, "failed to apply code") + } + return nil +} + +func (c *Controller) GetOrCreateReferralCode(ctx *gin.Context, userID int64) (*string, error) { + referralCode, err := c.StorageBonus.GetCode(ctx, userID) + if err != nil { + if !errors.Is(err, sql.ErrNoRows) { + return nil, stacktrace.Propagate(err, "failed to get storagebonus code") + } + code, err := generateAlphaNumString(codeLength) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + err = c.StorageBonus.InsertCode(ctx, userID, code) + if err != nil { + return nil, stacktrace.Propagate(err, "failed to insert storagebonus code") + } + referralCode = &code + } + return referralCode, nil +} + +// generateAlphaNumString returns AlphaNumeric code of given length +// which exclude number 0 and letter O. The code always starts with an +// alphabet +func generateAlphaNumString(length int) (string, error) { + // Define the alphabet and numbers to be used in the string. + alphabet := "ABCDEFGHIJKLMNPQRSTUVWXYZ" + // Define the alphabet and numbers to be used in the string. + alphaNum := fmt.Sprintf("%s123456789", alphabet) + // Allocate a byte slice with the desired length. + result := make([]byte, length) + // Generate the first letter as an alphabet. + r0, err := auth.GenerateRandomInt(int64(len(alphabet))) + if err != nil { + return "", stacktrace.Propagate(err, "") + } + result[0] = alphabet[r0] + // Generate the remaining characters as alphanumeric. + for i := 1; i < length; i++ { + ri, err := auth.GenerateRandomInt(int64(len(alphaNum))) + if err != nil { + return "", stacktrace.Propagate(err, "") + } + result[i] = alphaNum[ri] + } + return string(result), nil +} diff --git a/server/pkg/controller/storagebonus/referral_cron.go b/server/pkg/controller/storagebonus/referral_cron.go new file mode 100644 index 000000000..8b990bfed --- /dev/null +++ b/server/pkg/controller/storagebonus/referral_cron.go @@ -0,0 +1,58 @@ +package storagebonus + +import ( + "context" + + "github.com/ente-io/museum/pkg/utils/time" + "github.com/sirupsen/logrus" +) + +// PaymentUpgradeOrDowngradeCron cron which returns if CronRunning is true and if false, +// it acquires a lock using the lock controller and sets CronRunning to true. +// It then runs the cron and sets CronRunning to false. +func (c *Controller) PaymentUpgradeOrDowngradeCron() { + cronName := "payment_upgrade_or_downgrade" + logger := logrus.WithField("cron", cronName) + ctx := context.Background() + if c.CronRunning { + return + } + if !c.LockController.TryLock("payment_upgrade_or_downgrade", time.MicrosecondsAfterMinutes(10)) { + return + } + c.CronRunning = true + defer func() { + c.LockController.ReleaseLock("payment_upgrade_or_downgrade") + c.CronRunning = false + }() + bonusCandidate, err := c.StorageBonus.GetReferredForUpgradeBonus(ctx) + if err != nil { + logger.WithError(err).Error("failed to GetReferredForUpgradeBonus") + return + } + for _, trackingEntry := range bonusCandidate { + ctxField := logrus.Fields{ + "invitee": trackingEntry.Invitee, + "invitor": trackingEntry.Invitor, + "plan": trackingEntry.PlanType, + "action": "upgrade_bonus", + } + logger.WithFields(ctxField).Info("processing referral upgrade") + upgradeErr := c.StorageBonus.TrackUpgradeAndInvitorBonus(ctx, trackingEntry.Invitee, trackingEntry.Invitor, trackingEntry.PlanType) + if upgradeErr != nil { + logger.WithError(upgradeErr).WithFields(ctxField).Error("failed to track upgrade and invitor bonusCandidate") + } else { + c.EmailNotificationController.OnSuccessfulReferral(trackingEntry.Invitor) + } + } + + bonusPenaltyCandidates, err := c.StorageBonus.GetReferredForDowngradePenalty(ctx) + if err != nil { + logger.WithError(err).Error("failed to GetReferredForUpgradeBonus") + return + } + if len(bonusPenaltyCandidates) > 0 { + logger.WithField("count", len(bonusPenaltyCandidates)).Error("candidates found for downgrade penalty") + // todo: implement downgrade penalty + } +} diff --git a/server/pkg/controller/stripe.go b/server/pkg/controller/stripe.go new file mode 100644 index 000000000..dc9f57eaf --- /dev/null +++ b/server/pkg/controller/stripe.go @@ -0,0 +1,705 @@ +package controller + +import ( + "context" + "database/sql" + "encoding/json" + "errors" + "fmt" + "github.com/ente-io/museum/pkg/controller/commonbilling" + "net/http" + "strconv" + + "github.com/ente-io/museum/pkg/controller/discord" + "github.com/ente-io/museum/pkg/controller/offer" + "github.com/ente-io/museum/pkg/repo/storagebonus" + + "github.com/ente-io/museum/ente" + emailCtrl "github.com/ente-io/museum/pkg/controller/email" + "github.com/ente-io/museum/pkg/repo" + "github.com/ente-io/museum/pkg/utils/billing" + "github.com/ente-io/museum/pkg/utils/email" + "github.com/ente-io/stacktrace" + log "github.com/sirupsen/logrus" + "github.com/spf13/viper" + "github.com/stripe/stripe-go/v72" + "github.com/stripe/stripe-go/v72/client" + "github.com/stripe/stripe-go/v72/invoice" + "github.com/stripe/stripe-go/v72/webhook" + "golang.org/x/text/currency" +) + +// StripeController provides abstractions for handling billing on Stripe +type StripeController struct { + StripeClients ente.StripeClientPerAccount + BillingPlansPerAccount ente.BillingPlansPerAccount + BillingRepo *repo.BillingRepository + FileRepo *repo.FileRepository + UserRepo *repo.UserRepository + StorageBonusRepo *storagebonus.Repository + DiscordController *discord.DiscordController + EmailNotificationCtrl *emailCtrl.EmailNotificationController + OfferController *offer.OfferController + CommonBillCtrl *commonbilling.Controller +} + +// A flag we set on Stripe subscriptions to indicate that we should skip on +// sending out the email when the subscription has been cancelled. +// +// This is needed e.g. if this cancellation was as part of a user initiated +// account deletion. +const SkipMailKey = "skip_mail" + +// Return a new instance of StripeController +func NewStripeController(plans ente.BillingPlansPerAccount, stripeClients ente.StripeClientPerAccount, billingRepo *repo.BillingRepository, fileRepo *repo.FileRepository, userRepo *repo.UserRepository, storageBonusRepo *storagebonus.Repository, discordController *discord.DiscordController, emailNotificationController *emailCtrl.EmailNotificationController, offerController *offer.OfferController, commonBillCtrl *commonbilling.Controller) *StripeController { + return &StripeController{ + StripeClients: stripeClients, + BillingRepo: billingRepo, + FileRepo: fileRepo, + UserRepo: userRepo, + BillingPlansPerAccount: plans, + StorageBonusRepo: storageBonusRepo, + DiscordController: discordController, + EmailNotificationCtrl: emailNotificationController, + OfferController: offerController, + CommonBillCtrl: commonBillCtrl, + } +} + +// GetCheckoutSession handles the creation of stripe checkout session for subscription purchase +func (c *StripeController) GetCheckoutSession(productID string, userID int64, redirectRootURL string) (string, error) { + if productID == "" { + return "", stacktrace.Propagate(ente.ErrBadRequest, "") + } + subscription, err := c.BillingRepo.GetUserSubscription(userID) + if err != nil { + // error sql.ErrNoRows not possible as user must at least have a free subscription + return "", stacktrace.Propagate(err, "") + } + hasActivePaidSubscription := billing.IsActivePaidPlan(subscription) + hasStripeSubscription := subscription.PaymentProvider == ente.Stripe + if hasActivePaidSubscription { + if hasStripeSubscription { + return "", stacktrace.Propagate(ente.ErrBadRequest, "") + } else if !subscription.Attributes.IsCancelled { + return "", stacktrace.Propagate(ente.ErrBadRequest, "") + } + } + if subscription.PaymentProvider == ente.Stripe && !subscription.Attributes.IsCancelled { + // user had bought a stripe subscription earlier, + err := c.cancelExistingStripeSubscription(subscription, userID) + if err != nil { + return "", stacktrace.Propagate(err, "") + } + } + stripeSuccessURL := redirectRootURL + viper.GetString("stripe.path.success") + stripeCancelURL := redirectRootURL + viper.GetString("stripe.path.cancel") + allowPromotionCodes := true + params := &stripe.CheckoutSessionParams{ + ClientReferenceID: stripe.String(strconv.FormatInt(userID, 10)), + SuccessURL: stripe.String(stripeSuccessURL), + CancelURL: stripe.String(stripeCancelURL), + Mode: stripe.String(string(stripe.CheckoutSessionModeSubscription)), + LineItems: []*stripe.CheckoutSessionLineItemParams{ + { + Price: stripe.String(productID), + Quantity: stripe.Int64(1), + }, + }, + AllowPromotionCodes: &allowPromotionCodes, + } + var stripeClient *client.API + if subscription.PaymentProvider == ente.Stripe { + stripeClient = c.StripeClients[subscription.Attributes.StripeAccountCountry] + // attach the subscription to existing customerID + params.Customer = stripe.String(subscription.Attributes.CustomerID) + } else { + stripeClient = c.StripeClients[ente.DefaultStripeAccountCountry] + user, err := c.UserRepo.Get(userID) + if err != nil { + return "", stacktrace.Propagate(err, "") + } + // attach user's emailID to the checkout session and subsequent subscription bought + params.CustomerEmail = stripe.String(user.Email) + } + + s, err := stripeClient.CheckoutSessions.New(params) + if err != nil { + return "", stacktrace.Propagate(err, "") + } + return s.ID, nil +} + +// GetVerifiedSubscription verifies and returns the verified subscription +func (c *StripeController) GetVerifiedSubscription(userID int64, sessionID string) (ente.Subscription, error) { + var stripeSubscription stripe.Subscription + var err error + if sessionID != "" { + log.Info("Received session ID: " + sessionID) + // Get verified subscription request was received from success redirect page + stripeSubscription, err = c.getStripeSubscriptionFromSession(userID, sessionID) + } else { + log.Info("Did not receive a session ID") + // Get verified subscription request for a subscription update + stripeSubscription, err = c.getUserStripeSubscription(userID) + } + if err != nil { + return ente.Subscription{}, stacktrace.Propagate(err, "") + } + log.Info("Received stripe subscription with ID: " + stripeSubscription.ID) + subscription, err := c.getEnteSubscriptionFromStripeSubscription(userID, stripeSubscription) + if err != nil { + return ente.Subscription{}, stacktrace.Propagate(err, "") + } + log.Info("Returning ente subscription with ID: " + strconv.FormatInt(subscription.ID, 10)) + return subscription, nil +} + +func (c *StripeController) HandleUSNotification(payload []byte, header string) error { + event, err := webhook.ConstructEvent(payload, header, viper.GetString("stripe.us.webhook-secret")) + if err != nil { + return stacktrace.Propagate(err, "") + } + return c.handleWebhookEvent(event) +} + +func (c *StripeController) HandleINNotification(payload []byte, header string) error { + event, err := webhook.ConstructEvent(payload, header, viper.GetString("stripe.in.webhook-secret")) + if err != nil { + return stacktrace.Propagate(err, "") + } + return c.handleWebhookEvent(event) +} + +func (c *StripeController) handleWebhookEvent(event stripe.Event) error { + // The event body would already have been logged by the upper layers by the + // time we get here, so we can only handle the events that we care about. In + // case we receive an unexpected event, we do log an error though. + handler := c.findHandlerForEvent(event) + if handler == nil { + log.Error("Received an unexpected webhook from stripe:", event.Type) + return nil + } + eventLog, err := handler(event) + if err != nil { + return stacktrace.Propagate(err, "") + } + if eventLog.UserID == 0 { + // Do not try to log if we do not have an associated user. This can + // happen, e.g. with out of order webhooks. + // Or in case of offer application, where events are logged by the Storage Bonus Repo + // + // See: Ignore webhooks received before user has been created + return nil + } + err = c.BillingRepo.LogStripePush(eventLog) + return stacktrace.Propagate(err, "") +} + +func (c *StripeController) findHandlerForEvent(event stripe.Event) func(event stripe.Event) (ente.StripeEventLog, error) { + switch event.Type { + case "checkout.session.completed": + return c.handleCheckoutSessionCompleted + case "customer.subscription.deleted": + return c.handleCustomerSubscriptionDeleted + case "customer.subscription.updated": + return c.handleCustomerSubscriptionUpdated + case "invoice.paid": + return c.handleInvoicePaid + default: + return nil + } +} + +// Payment is successful and the subscription is created. +// You should provision the subscription. +func (c *StripeController) handleCheckoutSessionCompleted(event stripe.Event) (ente.StripeEventLog, error) { + var session stripe.CheckoutSession + json.Unmarshal(event.Data.Raw, &session) + if session.ClientReferenceID != "" { // via payments.ente.io, where we inserted the userID + userID, _ := strconv.ParseInt(session.ClientReferenceID, 10, 64) + newSubscription, err := c.GetVerifiedSubscription(userID, session.ID) + if err != nil { + return ente.StripeEventLog{}, stacktrace.Propagate(err, "") + } + stripeSubscription, err := c.getStripeSubscriptionFromSession(userID, session.ID) + if err != nil { + return ente.StripeEventLog{}, stacktrace.Propagate(err, "") + } + currentSubscription, err := c.BillingRepo.GetUserSubscription(userID) + if err != nil { + return ente.StripeEventLog{}, stacktrace.Propagate(err, "") + } + if currentSubscription.ExpiryTime >= newSubscription.ExpiryTime && + currentSubscription.ProductID != ente.FreePlanProductID { + log.Warn("Webhook is reporting an outdated purchase that was already verified stripeSubscription:", stripeSubscription.ID) + return ente.StripeEventLog{UserID: userID, StripeSubscription: stripeSubscription, Event: event}, nil + } + err = c.BillingRepo.ReplaceSubscription( + currentSubscription.ID, + newSubscription, + ) + isUpgradingFromFreePlan := currentSubscription.ProductID == ente.FreePlanProductID + if isUpgradingFromFreePlan { + go func() { + cur := currency.MustParseISO(string(session.Currency)) + amount := fmt.Sprintf("%v%v", currency.Symbol(cur), float64(session.AmountTotal)/float64(100)) + c.DiscordController.NotifyNewSub(userID, "stripe", amount) + }() + go func() { + c.EmailNotificationCtrl.OnAccountUpgrade(userID) + }() + } + if err != nil { + return ente.StripeEventLog{}, stacktrace.Propagate(err, "") + } + return ente.StripeEventLog{UserID: userID, StripeSubscription: stripeSubscription, Event: event}, nil + } else { + priceID, err := c.getPriceIDFromSession(session.ID) + if err != nil { + return ente.StripeEventLog{}, stacktrace.Propagate(err, "") + } + email := session.CustomerDetails.Email + err = c.OfferController.ApplyOffer(email, priceID) + if err != nil { + return ente.StripeEventLog{}, stacktrace.Propagate(err, "") + } + } + return ente.StripeEventLog{}, nil +} + +// Occurs whenever a customer's subscription ends. +func (c *StripeController) handleCustomerSubscriptionDeleted(event stripe.Event) (ente.StripeEventLog, error) { + var stripeSubscription stripe.Subscription + json.Unmarshal(event.Data.Raw, &stripeSubscription) + currentSubscription, err := c.BillingRepo.GetSubscriptionForTransaction(stripeSubscription.ID, ente.Stripe) + if err != nil { + // Ignore webhooks received before user has been created + // + // This would happen when we get webhook events out of order, e.g. we + // get a "customer.subscription.updated" before + // "checkout.session.completed", and the customer has not yet been + // created in our database. + if errors.Is(err, sql.ErrNoRows) { + log.Warn("Webhook is reporting an event for un-verified subscription stripeSubscriptionID:", stripeSubscription.ID) + return ente.StripeEventLog{}, nil + } + return ente.StripeEventLog{}, stacktrace.Propagate(err, "") + } + userID := currentSubscription.UserID + user, err := c.UserRepo.Get(userID) + if err != nil { + if errors.Is(err, ente.ErrUserDeleted) { + // no-op user has already been deleted + return ente.StripeEventLog{UserID: userID, StripeSubscription: stripeSubscription, Event: event}, nil + } + return ente.StripeEventLog{}, stacktrace.Propagate(err, "") + } + + skipMail := stripeSubscription.Metadata[SkipMailKey] + // Send a cancellation notification email for folks who are either on + // individual plan or admin of a family plan. + if skipMail != "true" && + (user.FamilyAdminID == nil || *user.FamilyAdminID == userID) { + storage, surpErr := c.StorageBonusRepo.GetPaidAddonSurplusStorage(context.Background(), userID) + if surpErr != nil { + return ente.StripeEventLog{}, stacktrace.Propagate(surpErr, "") + } + if storage == nil || *storage <= 0 { + err = email.SendTemplatedEmail([]string{user.Email}, "ente", "support@ente.io", + ente.SubscriptionEndedEmailSubject, ente.SubscriptionEndedEmailTemplate, + map[string]interface{}{}, nil) + if err != nil { + return ente.StripeEventLog{}, stacktrace.Propagate(err, "") + } + } else { + log.WithField("storage", storage).Info("User has surplus storage, not sending email") + } + } + // TODO: Add cron to delete files of users with expired subscriptions + return ente.StripeEventLog{UserID: userID, StripeSubscription: stripeSubscription, Event: event}, nil +} + +// Occurs whenever a subscription changes (e.g., switching from one plan to +// another, or changing the status from trial to active). +func (c *StripeController) handleCustomerSubscriptionUpdated(event stripe.Event) (ente.StripeEventLog, error) { + var stripeSubscription stripe.Subscription + json.Unmarshal(event.Data.Raw, &stripeSubscription) + currentSubscription, err := c.BillingRepo.GetSubscriptionForTransaction(stripeSubscription.ID, ente.Stripe) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + // See: Ignore webhooks received before user has been created + log.Warn("Webhook is reporting an event for un-verified subscription stripeSubscriptionID:", stripeSubscription.ID) + return ente.StripeEventLog{}, nil + } + return ente.StripeEventLog{}, stacktrace.Propagate(err, "") + } + + userID := currentSubscription.UserID + switch stripeSubscription.Status { + case stripe.SubscriptionStatusPastDue: + user, err := c.UserRepo.Get(userID) + if err != nil { + return ente.StripeEventLog{}, stacktrace.Propagate(err, "") + } + err = email.SendTemplatedEmail([]string{user.Email}, "ente", "support@ente.io", + ente.AccountOnHoldEmailSubject, ente.OnHoldTemplate, map[string]interface{}{ + "PaymentProvider": "Stripe", + }, nil) + if err != nil { + return ente.StripeEventLog{}, stacktrace.Propagate(err, "") + } + case stripe.SubscriptionStatusActive: + newSubscription, err := c.getEnteSubscriptionFromStripeSubscription(userID, stripeSubscription) + if err != nil { + return ente.StripeEventLog{}, stacktrace.Propagate(err, "") + } + if currentSubscription.ProductID == newSubscription.ProductID { + // Webhook is reporting an outdated update that was already verified + // no-op + log.Warn("Webhook is reporting an outdated purchase that was already verified stripeSubscriptionID:", stripeSubscription.ID) + return ente.StripeEventLog{UserID: userID, StripeSubscription: stripeSubscription, Event: event}, nil + } + if newSubscription.ProductID != currentSubscription.ProductID { + c.BillingRepo.ReplaceSubscription(currentSubscription.ID, newSubscription) + } + } + return ente.StripeEventLog{UserID: userID, StripeSubscription: stripeSubscription, Event: event}, nil +} + +// Continue to provision the subscription as payments continue to be made. +func (c *StripeController) handleInvoicePaid(event stripe.Event) (ente.StripeEventLog, error) { + var invoice stripe.Invoice + json.Unmarshal(event.Data.Raw, &invoice) + stripeSubscriptionID := invoice.Subscription.ID + currentSubscription, err := c.BillingRepo.GetSubscriptionForTransaction(stripeSubscriptionID, ente.Stripe) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + // See: Ignore webhooks received before user has been created + log.Warn("Webhook is reporting an event for un-verified subscription stripeSubscriptionID:", stripeSubscriptionID) + return ente.StripeEventLog{}, nil + } + return ente.StripeEventLog{}, stacktrace.Propagate(err, "") + } + + userID := currentSubscription.UserID + client := c.StripeClients[currentSubscription.Attributes.StripeAccountCountry] + + stripeSubscription, err := client.Subscriptions.Get(stripeSubscriptionID, nil) + if err != nil { + return ente.StripeEventLog{}, stacktrace.Propagate(err, "") + } + + newExpiryTime := stripeSubscription.CurrentPeriodEnd * 1000 * 1000 + if currentSubscription.ExpiryTime == newExpiryTime { + //outdated invoice + log.Warn("Webhook is reporting an outdated purchase that was already verified stripeSubscriptionID:", stripeSubscription.ID) + return ente.StripeEventLog{UserID: userID, StripeSubscription: *stripeSubscription, Event: event}, nil + } + err = c.BillingRepo.UpdateSubscriptionExpiryTime( + currentSubscription.ID, newExpiryTime) + if err != nil { + return ente.StripeEventLog{}, stacktrace.Propagate(err, "") + } + return ente.StripeEventLog{UserID: userID, StripeSubscription: *stripeSubscription, Event: event}, nil +} + +func (c *StripeController) UpdateSubscription(stripeID string, userID int64) (ente.SubscriptionUpdateResponse, error) { + subscription, err := c.BillingRepo.GetUserSubscription(userID) + if err != nil { + return ente.SubscriptionUpdateResponse{}, stacktrace.Propagate(err, "") + } + newPlan, newStripeAccountCountry, err := c.getPlanAndAccount(stripeID) + if err != nil { + return ente.SubscriptionUpdateResponse{}, stacktrace.Propagate(err, "") + } + if subscription.PaymentProvider != ente.Stripe || subscription.ProductID == stripeID || subscription.Attributes.StripeAccountCountry != newStripeAccountCountry { + return ente.SubscriptionUpdateResponse{}, stacktrace.Propagate(ente.ErrBadRequest, "") + } + if newPlan.Storage < subscription.Storage { // Downgrade + canDowngrade, canDowngradeErr := c.CommonBillCtrl.CanDowngradeToGivenStorage(newPlan.Storage, userID) + if canDowngradeErr != nil { + return ente.SubscriptionUpdateResponse{}, stacktrace.Propagate(canDowngradeErr, "") + } + if !canDowngrade { + return ente.SubscriptionUpdateResponse{}, stacktrace.Propagate(ente.ErrCannotDowngrade, "") + } + log.Info("Usage is good") + + } + client := c.StripeClients[subscription.Attributes.StripeAccountCountry] + stripeSubscription, err := client.Subscriptions.Get(subscription.OriginalTransactionID, nil) + if err != nil { + return ente.SubscriptionUpdateResponse{}, stacktrace.Propagate(err, "") + } + params := stripe.SubscriptionParams{ + ProrationBehavior: stripe.String(string(stripe.SubscriptionProrationBehaviorAlwaysInvoice)), + Items: []*stripe.SubscriptionItemsParams{ + { + ID: stripe.String(stripeSubscription.Items.Data[0].ID), + Price: stripe.String(stripeID), + }, + }, + PaymentBehavior: stripe.String(string(stripe.SubscriptionPaymentBehaviorPendingIfIncomplete)), + } + params.AddExpand("latest_invoice.payment_intent") + newStripeSubscription, err := client.Subscriptions.Update(subscription.OriginalTransactionID, ¶ms) + if err != nil { + stripeError := err.(*stripe.Error) + switch stripeError.Type { + case stripe.ErrorTypeCard: + return ente.SubscriptionUpdateResponse{Status: "requires_payment_method"}, nil + default: + return ente.SubscriptionUpdateResponse{}, stacktrace.Propagate(err, "") + } + } + if newStripeSubscription.PendingUpdate != nil { + switch newStripeSubscription.LatestInvoice.PaymentIntent.Status { + case stripe.PaymentIntentStatusRequiresAction: + return ente.SubscriptionUpdateResponse{Status: "requires_action", ClientSecret: newStripeSubscription.LatestInvoice.PaymentIntent.ClientSecret}, nil + case stripe.PaymentIntentStatusRequiresPaymentMethod: + inv := newStripeSubscription.LatestInvoice + invoice.VoidInvoice(inv.ID, nil) + return ente.SubscriptionUpdateResponse{Status: "requires_payment_method"}, nil + } + return ente.SubscriptionUpdateResponse{}, stacktrace.Propagate(ente.ErrBadRequest, "") + } + return ente.SubscriptionUpdateResponse{Status: "success"}, nil + +} + +func (c *StripeController) UpdateSubscriptionCancellationStatus(userID int64, status bool) (ente.Subscription, error) { + subscription, err := c.BillingRepo.GetUserSubscription(userID) + if err != nil { + // error sql.ErrNoRows not possible as user must at least have a free subscription + return ente.Subscription{}, stacktrace.Propagate(err, "") + } + if subscription.PaymentProvider != ente.Stripe { + return ente.Subscription{}, stacktrace.Propagate(ente.ErrBadRequest, "") + } + + if subscription.Attributes.IsCancelled == status { + // no-op + return subscription, nil + } + + client := c.StripeClients[subscription.Attributes.StripeAccountCountry] + params := &stripe.SubscriptionParams{ + CancelAtPeriodEnd: stripe.Bool(status), + } + _, err = client.Subscriptions.Update(subscription.OriginalTransactionID, params) + if err != nil { + return ente.Subscription{}, stacktrace.Propagate(err, "") + } + err = c.BillingRepo.UpdateSubscriptionCancellationStatus(userID, status) + if err != nil { + return ente.Subscription{}, stacktrace.Propagate(err, "") + } + subscription.Attributes.IsCancelled = status + return subscription, nil +} + +func (c *StripeController) GetStripeCustomerPortal(userID int64, redirectRootURL string) (string, error) { + subscription, err := c.BillingRepo.GetUserSubscription(userID) + if err != nil { + return "", stacktrace.Propagate(err, "") + } + if subscription.PaymentProvider != ente.Stripe { + return "", stacktrace.Propagate(ente.ErrBadRequest, "") + } + client := c.StripeClients[subscription.Attributes.StripeAccountCountry] + + params := &stripe.BillingPortalSessionParams{ + Customer: stripe.String(subscription.Attributes.CustomerID), + ReturnURL: stripe.String(redirectRootURL), + } + ps, err := client.BillingPortalSessions.New(params) + if err != nil { + return "", stacktrace.Propagate(err, "") + } + return ps.URL, nil +} + +func (c *StripeController) getStripeSubscriptionFromSession(userID int64, checkoutSessionID string) (stripe.Subscription, error) { + subscription, err := c.BillingRepo.GetUserSubscription(userID) + if err != nil { + return stripe.Subscription{}, stacktrace.Propagate(err, "") + } + var stripeClient *client.API + if subscription.PaymentProvider == ente.Stripe { + stripeClient = c.StripeClients[subscription.Attributes.StripeAccountCountry] + } else { + stripeClient = c.StripeClients[ente.DefaultStripeAccountCountry] + } + params := &stripe.CheckoutSessionParams{} + params.AddExpand("subscription") + checkoutSession, err := stripeClient.CheckoutSessions.Get(checkoutSessionID, params) + if err != nil { + return stripe.Subscription{}, stacktrace.Propagate(err, "") + } + if (*checkoutSession.Subscription).Status != stripe.SubscriptionStatusActive { + return stripe.Subscription{}, stacktrace.Propagate(&stripe.InvalidRequestError{}, "") + } + return *checkoutSession.Subscription, nil +} + +func (c *StripeController) getPriceIDFromSession(sessionID string) (string, error) { + stripeClient := c.StripeClients[ente.DefaultStripeAccountCountry] + params := &stripe.CheckoutSessionListLineItemsParams{} + params.AddExpand("data.price") + items := stripeClient.CheckoutSessions.ListLineItems(sessionID, params) + for items.Next() { // Return the first PriceID that has been fetched + return items.LineItem().Price.ID, nil + } + return "", stacktrace.Propagate(ente.ErrNotFound, "") +} + +func (c *StripeController) getUserStripeSubscription(userID int64) (stripe.Subscription, error) { + subscription, err := c.BillingRepo.GetUserSubscription(userID) + if err != nil { + return stripe.Subscription{}, stacktrace.Propagate(err, "") + } + if subscription.PaymentProvider != ente.Stripe { + return stripe.Subscription{}, stacktrace.Propagate(ente.ErrCannotSwitchPaymentProvider, "") + } + client := c.StripeClients[subscription.Attributes.StripeAccountCountry] + stripeSubscription, err := client.Subscriptions.Get(subscription.OriginalTransactionID, nil) + if err != nil { + return stripe.Subscription{}, stacktrace.Propagate(err, "") + } + return *stripeSubscription, nil +} + +func (c *StripeController) getPlanAndAccount(stripeID string) (ente.BillingPlan, ente.StripeAccountCountry, error) { + for stripeAccountCountry, billingPlansCountryWise := range c.BillingPlansPerAccount { + for _, plans := range billingPlansCountryWise { + for _, plan := range plans { + if plan.StripeID == stripeID { + return plan, stripeAccountCountry, nil + } + } + } + } + return ente.BillingPlan{}, "", stacktrace.Propagate(ente.ErrNotFound, "") +} + +func (c *StripeController) getEnteSubscriptionFromStripeSubscription(userID int64, stripeSubscription stripe.Subscription) (ente.Subscription, error) { + productID := stripeSubscription.Items.Data[0].Price.ID + plan, stripeAccountCountry, err := c.getPlanAndAccount(productID) + if err != nil { + return ente.Subscription{}, stacktrace.Propagate(err, "") + } + s := ente.Subscription{ + UserID: userID, + PaymentProvider: ente.Stripe, + ProductID: productID, + Storage: plan.Storage, + Attributes: ente.SubscriptionAttributes{CustomerID: stripeSubscription.Customer.ID, IsCancelled: false, StripeAccountCountry: stripeAccountCountry}, + OriginalTransactionID: stripeSubscription.ID, + ExpiryTime: stripeSubscription.CurrentPeriodEnd * 1000 * 1000, + } + return s, nil +} + +func (c *StripeController) UpdateBillingEmail(subscription ente.Subscription, newEmail string) error { + params := &stripe.CustomerParams{Email: &newEmail} + client := c.StripeClients[subscription.Attributes.StripeAccountCountry] + _, err := client.Customers.Update( + subscription.Attributes.CustomerID, + params, + ) + if err != nil { + return stacktrace.Propagate(err, "failed to update stripe customer emailID") + } + return nil +} + +func (c *StripeController) CancelSubAndDeleteCustomer(subscription ente.Subscription, logger *log.Entry) error { + client := c.StripeClients[subscription.Attributes.StripeAccountCountry] + if !subscription.Attributes.IsCancelled { + prorateRefund := true + logger.Info("cancelling sub with prorated refund") + updateParams := &stripe.SubscriptionParams{} + updateParams.AddMetadata(SkipMailKey, "true") + _, err := client.Subscriptions.Update(subscription.OriginalTransactionID, updateParams) + if err != nil { + stripeError := err.(*stripe.Error) + errorMsg := fmt.Sprintf("subscription updation failed during account deletion: %s, %s", stripeError.Msg, stripeError.Code) + log.Error(errorMsg) + c.DiscordController.Notify(errorMsg) + if stripeError.HTTPStatusCode == http.StatusNotFound { + log.Error("Ignoring error since an active subscription could not be found") + return nil + } else if stripeError.HTTPStatusCode == http.StatusBadRequest { + log.Error("Bad request while trying to delete account") + return nil + } + return stacktrace.Propagate(err, "") + } + _, err = client.Subscriptions.Cancel(subscription.OriginalTransactionID, &stripe.SubscriptionCancelParams{ + Prorate: &prorateRefund, + }) + if err != nil { + stripeError := err.(*stripe.Error) + logger.Error(fmt.Sprintf("subscription cancel failed msg= %s for userID=%d"+stripeError.Msg, subscription.UserID)) + // ignore if subscription doesn't exist, already deleted + if stripeError.HTTPStatusCode != 404 { + return stacktrace.Propagate(err, "") + } + } + err = c.BillingRepo.UpdateSubscriptionCancellationStatus(subscription.UserID, true) + if err != nil { + return stacktrace.Propagate(err, "") + } + } + logger.Info("deleting customer from stripe") + _, err := client.Customers.Del( + subscription.Attributes.CustomerID, + &stripe.CustomerParams{}, + ) + if err != nil { + stripeError := err.(*stripe.Error) + switch stripeError.Type { + case stripe.ErrorTypeInvalidRequest: + if stripe.ErrorCodeResourceMissing == stripeError.Code { + return nil + } + return stacktrace.Propagate(err, fmt.Sprintf("failed to delete customer %s", subscription.Attributes.CustomerID)) + default: + return stacktrace.Propagate(err, fmt.Sprintf("failed to delete customer %s", subscription.Attributes.CustomerID)) + } + } + return nil +} + +// cancel the earlier past_due subscription +// and add skip mail metadata entry to avoid sending account deletion mail while re-subscription +func (c *StripeController) cancelExistingStripeSubscription(subscription ente.Subscription, userID int64) error { + updateParams := &stripe.SubscriptionParams{} + updateParams.AddMetadata(SkipMailKey, "true") + client := c.StripeClients[subscription.Attributes.StripeAccountCountry] + _, err := client.Subscriptions.Update(subscription.OriginalTransactionID, updateParams) + if err != nil { + stripeError := err.(*stripe.Error) + log.Warn(fmt.Sprintf("subscription updation failed msg= %s for userID=%d", stripeError.Msg, userID)) + // ignore if subscription doesn't exist, already deleted + if stripeError.HTTPStatusCode != 404 { + return stacktrace.Propagate(err, "") + } + } else { + _, err = client.Subscriptions.Cancel(subscription.OriginalTransactionID, nil) + if err != nil { + stripeError := err.(*stripe.Error) + log.Warn(fmt.Sprintf("subscription cancel failed msg= %s for userID=%d", stripeError.Msg, userID)) + // ignore if subscription doesn't exist, already deleted + if stripeError.HTTPStatusCode != 404 { + return stacktrace.Propagate(err, "") + } + } + err = c.BillingRepo.UpdateSubscriptionCancellationStatus(userID, true) + if err != nil { + return stacktrace.Propagate(err, "") + } + } + return nil +} diff --git a/server/pkg/controller/trash.go b/server/pkg/controller/trash.go new file mode 100644 index 000000000..0b3d242a9 --- /dev/null +++ b/server/pkg/controller/trash.go @@ -0,0 +1,326 @@ +package controller + +import ( + "context" + "fmt" + "strconv" + "strings" + + "github.com/ente-io/museum/ente" + "github.com/ente-io/museum/pkg/repo" + "github.com/ente-io/museum/pkg/utils/time" + "github.com/ente-io/stacktrace" + "github.com/google/uuid" + log "github.com/sirupsen/logrus" +) + +// TrashController has the business logic related to trash feature +type TrashController struct { + TrashRepo *repo.TrashRepository + FileRepo *repo.FileRepository + CollectionRepo *repo.CollectionRepository + QueueRepo *repo.QueueRepository + TaskLockRepo *repo.TaskLockRepository + HostName string + dropFileMetadataRunning bool + collectionTrashRunning bool + emptyTrashRunning bool + // deleteAgedTrashRunning indicates whether the cron to delete trashed files which are in trash + // since repo.TrashDurationInDays is running + deleteAgedTrashRunning bool +} + +// GetDiff returns the changes in user's trash since a timestamp, along with hasMore bool flag. +func (t *TrashController) GetDiff(userID int64, sinceTime int64, stripMetadata bool, app ente.App) ([]ente.Trash, bool, error) { + trashFilesDiff, hasMore, err := t.getDiff(userID, sinceTime, repo.TrashDiffLimit, app) + if err != nil { + return nil, false, err + } + // hide private metadata before returning files info in diff + if stripMetadata { + for _, trashFile := range trashFilesDiff { + if trashFile.IsDeleted { + trashFile.File.MagicMetadata = nil + trashFile.File.PubicMagicMetadata = nil + trashFile.File.Metadata = ente.FileAttributes{} + trashFile.File.Info = nil + } + } + } + return trashFilesDiff, hasMore, err +} + +// GetDiff returns the diff in user's trash since a timestamp, along with hasMore bool flag. +// The function will never return partial result for a version. To maintain this promise, it will not be able to honor +// the limit parameter. Based on the db state, compared to the limit, the diff length can be +// less (case 1), more (case 2), or same (case 3, 4) +// Example: Assume we have 11 files with following versions: v0, v1, v1, v1, v1, v1, v1, v1, v2, v2, v2 (count = 7 v1, 3 v2) +// client has synced up till version v0. +// case 1: ( sinceTime: v0, limit = 8): +// The method will discard the entries with version v2 and return only 7 entries with version v1. +// case 2: (sinceTime: v0, limit 5): +// Instead of returning 5 entries with version V1, method will return all 7 entries with version v1. +// case 3: (sinceTime: v0, limit 7): +// The method will return all 7 entries with version V1. +// case 4: (sinceTime: v0, limit >=10): +// The method will all 10 entries in the diff +func (t *TrashController) getDiff(userID int64, sinceTime int64, limit int, app ente.App) ([]ente.Trash, bool, error) { + // request for limit +1 files + diffLimitPlusOne, err := t.TrashRepo.GetDiff(userID, sinceTime, limit+1, app) + if err != nil { + return nil, false, stacktrace.Propagate(err, "") + } + if len(diffLimitPlusOne) <= limit { + // case 4: all files changed after sinceTime are included. + return diffLimitPlusOne, false, nil + } + lastFileVersion := diffLimitPlusOne[limit].UpdatedAt + filteredDiffs := t.removeFilesWithVersion(diffLimitPlusOne, lastFileVersion) + if len(filteredDiffs) > 0 { // case 1 or case 3 + return filteredDiffs, true, nil + } + // case 2 + diff, err := t.TrashRepo.GetFilesWithVersion(userID, lastFileVersion) + if err != nil { + return nil, false, stacktrace.Propagate(err, "") + } + return diff, true, nil +} + +// Delete files permanently, queues up the file for deletion & free up the space based on file's object size +func (t *TrashController) Delete(ctx context.Context, request ente.DeleteTrashFilesRequest) error { + err := t.TrashRepo.Delete(ctx, request.OwnerID, request.FileIDs) + if err != nil { + return stacktrace.Propagate(err, "") + } + return nil +} + +func (t *TrashController) EmptyTrash(ctx context.Context, userID int64, req ente.EmptyTrashRequest) error { + err := t.TrashRepo.EmptyTrash(ctx, userID, req.LastUpdatedAt) + if err != nil { + return stacktrace.Propagate(err, "") + } + defer t.ProcessEmptyTrashRequests() + return nil +} + +func (t *TrashController) CleanupTrashedCollections() { + ctxLogger := log.WithFields(log.Fields{ + "flow": "trash_collection", + "id": uuid.New().String(), + }) + item_processed_count := 0 + if t.collectionTrashRunning { + ctxLogger.Info("Already moving collection to trash, skipping cron") + return + } + t.collectionTrashRunning = true + defer func() { + ctxLogger.WithField("items_processed", item_processed_count).Info("cron run finished") + t.collectionTrashRunning = false + }() + + // process delete collection request for DELETE V2 + items, err := t.QueueRepo.GetItemsReadyForDeletion(repo.TrashCollectionQueue, 100) + if err != nil { + log.Error("Could not fetch from collection trash queue", err) + return + } + item_processed_count += len(items) + for _, item := range items { + t.trashCollection(item, repo.TrashCollectionQueue, true, ctxLogger) + } + + // process delete collection request for DELETE V3 + itemsV3, err2 := t.QueueRepo.GetItemsReadyForDeletion(repo.TrashCollectionQueueV3, 100) + if err2 != nil { + log.Error("Could not fetch from collection trash queue", err2) + return + } + item_processed_count += len(itemsV3) + for _, item := range itemsV3 { + t.trashCollection(item, repo.TrashCollectionQueueV3, false, ctxLogger) + } +} + +func (t *TrashController) ProcessEmptyTrashRequests() { + if t.emptyTrashRunning { + log.Info("Already processing empty trash requests, skipping cron") + return + } + t.emptyTrashRunning = true + defer func() { + t.emptyTrashRunning = false + }() + items, err := t.QueueRepo.GetItemsReadyForDeletion(repo.TrashEmptyQueue, 100) + if err != nil { + log.Error("Could not fetch from emptyTrashQueue queue", err) + return + } + for _, item := range items { + t.emptyTrash(item) + } +} + +// DeleteAgedTrashedFiles delete trashed files which are in trash since repo.TrashDurationInDays +func (t *TrashController) DeleteAgedTrashedFiles() { + if t.deleteAgedTrashRunning { + log.Info("Already deleting older trashed files, skipping cron") + return + } + t.deleteAgedTrashRunning = true + defer func() { + t.deleteAgedTrashRunning = false + }() + + lockName := "DeleteAgedTrashedFiles" + lockStatus, err := t.TaskLockRepo.AcquireLock(lockName, time.MicrosecondsAfterHours(1), t.HostName) + if err != nil || !lockStatus { + log.Error("Unable to acquire lock to DeleteAgedTrashedFiles") + return + } + defer func() { + releaseErr := t.TaskLockRepo.ReleaseLock(lockName) + if releaseErr != nil { + log.WithError(releaseErr).Error("Error while releasing aged trash lock") + } + }() + + userIDToFileMap, err := t.TrashRepo.GetUserIDToFileIDsMapForDeletion() + if err != nil { + log.Error("Could not fetch trashed files for deletion", err) + return + } + + for userID, fileIDs := range userIDToFileMap { + ctxLogger := log.WithFields(log.Fields{ + "user_id": userID, + "fileIds": fileIDs, + }) + ctxLogger.Info("start deleting old files from trash") + err = t.TrashRepo.Delete(context.Background(), userID, fileIDs) + if err != nil { + ctxLogger.WithError(err).Error("failed to delete file from trash") + continue + } + ctxLogger.Info("successfully deleted old files from trash") + } +} + +// removeFilesWithVersion returns filtered list of trashedFiles are removing all files with given version. +// Important: The method assumes that trashedFiles are sorted by increasing order of Trash.UpdatedAt +func (t *TrashController) removeFilesWithVersion(trashedFiles []ente.Trash, version int64) []ente.Trash { + var i = len(trashedFiles) - 1 + for ; i >= 0; i-- { + if trashedFiles[i].UpdatedAt != version { + // found index (from end) where file's version is different from given version + break + } + } + return trashedFiles[0 : i+1] +} + +func (t *TrashController) trashCollection(item repo.QueueItem, queueName string, trashOnlyExclusiveFiles bool, logger *log.Entry) { + cID, _ := strconv.ParseInt(item.Item, 10, 64) + collection, err := t.CollectionRepo.Get(cID) + if err != nil { + log.Error("Could not fetch collection "+item.Item, err) + return + } + ctxLogger := logger.WithFields(log.Fields{ + "collection_id": cID, + "user_id": collection.Owner.ID, + "queue": queueName, + "flow": "trash_collection", + }) + // to avoid race conditions while finding exclusive files, lock at user level, instead of individual collection + lockName := fmt.Sprintf("CollectionTrash:%d", collection.Owner.ID) + lockStatus, err := t.TaskLockRepo.AcquireLock(lockName, time.MicrosecondsAfterHours(1), t.HostName) + if err != nil || !lockStatus { + if err == nil { + ctxLogger.Error("lock is already taken for deleting collection") + } else { + ctxLogger.WithError(err).Error("critical: error while acquiring lock") + } + return + } + defer func() { + releaseErr := t.TaskLockRepo.ReleaseLock(lockName) + if releaseErr != nil { + ctxLogger.WithError(releaseErr).Error("Error while releasing lock") + } + }() + ctxLogger.Info("start trashing collection") + if trashOnlyExclusiveFiles { + err = t.CollectionRepo.TrashV2(cID, collection.Owner.ID) + } else { + err = t.CollectionRepo.TrashV3(context.Background(), cID) + } + if err != nil { + ctxLogger.WithError(err).Error("failed to trash collection") + return + } + err = t.QueueRepo.DeleteItem(queueName, item.Item) + if err != nil { + ctxLogger.WithError(err).Error("failed to delete item from queue") + return + } +} + +func (t *TrashController) emptyTrash(item repo.QueueItem) { + lockName := fmt.Sprintf("EmptyTrash:%s", item.Item) + lockStatus, err := t.TaskLockRepo.AcquireLock(lockName, time.MicrosecondsAfterHours(1), t.HostName) + split := strings.Split(item.Item, repo.EmptyTrashQueueItemSeparator) + userID, _ := strconv.ParseInt(split[0], 10, 64) + lastUpdateAt, _ := strconv.ParseInt(split[1], 10, 64) + ctxLogger := log.WithFields(log.Fields{ + "user_id": userID, + "lastUpdatedAt": lastUpdateAt, + "flow": "empty_trash", + }) + + if err != nil || !lockStatus { + if err == nil { + // todo: error only when lock is help for more than X durat + ctxLogger.Error("lock is already taken for emptying trash") + } else { + ctxLogger.WithError(err).Error("critical: error while acquiring lock") + } + return + } + defer func() { + releaseErr := t.TaskLockRepo.ReleaseLock(lockName) + if releaseErr != nil { + log.WithError(releaseErr).Error("Error while releasing lock") + } + }() + + ctxLogger.Info("Start emptying trash") + fileIDs, err := t.TrashRepo.GetFilesIDsForDeletion(userID, lastUpdateAt) + if err != nil { + ctxLogger.WithError(err).Error("Failed to fetch fileIDs") + return + } + ctx := context.Background() + size := len(fileIDs) + limit := repo.TrashBatchSize + for lb := 0; lb < size; lb += limit { + ub := lb + limit + if ub > size { + ub = size + } + batch := fileIDs[lb:ub] + err = t.TrashRepo.Delete(ctx, userID, batch) + if err != nil { + ctxLogger.WithField("batchIDs", batch).WithError(err).Error("Failed while deleting batch") + return + } + } + err = t.QueueRepo.DeleteItem(repo.TrashEmptyQueue, item.Item) + if err != nil { + log.Error("Error while removing item from queue "+item.Item, err) + return + } + ctxLogger.Info("Finished emptying trash") +} diff --git a/server/pkg/controller/trash_file_metadata.go b/server/pkg/controller/trash_file_metadata.go new file mode 100644 index 000000000..e72bbfa09 --- /dev/null +++ b/server/pkg/controller/trash_file_metadata.go @@ -0,0 +1,94 @@ +package controller + +import ( + "context" + "fmt" + "strconv" + + "github.com/ente-io/museum/pkg/repo" + "github.com/ente-io/museum/pkg/utils/time" + log "github.com/sirupsen/logrus" +) + +// DropFileMetadataCron removes the metadata for deleted files +func (t *TrashController) DropFileMetadataCron() { + ctx := context.Background() + lockName := "dropTrashedFileMetadata" + logger := log.WithField("cron", lockName) + if t.dropFileMetadataRunning { + logger.Info("already running") + return + } + t.dropFileMetadataRunning = true + defer func() { + t.dropFileMetadataRunning = false + }() + + lockStatus, err := t.TaskLockRepo.AcquireLock(lockName, time.MicrosecondsAfterHours(1), t.HostName) + if err != nil || !lockStatus { + logger.Error("Unable to acquire lock") + return + } + defer func() { + releaseErr := t.TaskLockRepo.ReleaseLock(lockName) + if releaseErr != nil { + logger.WithError(releaseErr).Error("Error while releasing lock") + } + }() + items, err := t.QueueRepo.GetItemsReadyForDeletion(repo.DropFileEncMedataQueue, 10) + if err != nil { + logger.WithError(err).Error("getItemsReadyForDeletion failed") + return + } + if len(items) == 0 { + logger.Info("add entry for dropping fileMetadata") + // insert entry with 0 as the last epochTime till when metadata is dropped. + err = t.QueueRepo.InsertItem(context.Background(), repo.DropFileEncMedataQueue, "0") + if err != nil { + logger.WithError(err).Error("failed to insert entry") + } + return + } + if len(items) > 1 { + logger.Error(fmt.Sprintf("queue %s should not have more than one entry", repo.DropFileEncMedataQueue)) + } + qItem := items[0] + droppedMetadataTill, parseErr := strconv.ParseInt(qItem.Item, 10, 64) + if parseErr != nil { + logger.WithError(parseErr).Error("failed to parse time") + return + } + fileIDsWithUpdatedAt, err := t.TrashRepo.GetFileIdsForDroppingMetadata(droppedMetadataTill) + if err != nil { + logger.Error("error during next items fetch", err) + return + } + if len(fileIDsWithUpdatedAt) == 0 { + logger.Info("no pending entry") + return + } + var maxUpdatedAt = int64(0) + fileIDs := make([]int64, 0) + for _, item := range fileIDsWithUpdatedAt { + fileIDs = append(fileIDs, item.FileID) + if item.UpdatedAt > maxUpdatedAt { + maxUpdatedAt = item.UpdatedAt + } + } + ctxLogger := logger.WithFields(log.Fields{ + "maxUpdatedAt": maxUpdatedAt, + "fileIds": fileIDs, + }) + ctxLogger.Info("start dropping metadata") + err = t.FileRepo.DropFilesMetadata(ctx, fileIDs) + if err != nil { + ctxLogger.WithError(err).Error("failed to scrub data") + return + } + updateErr := t.QueueRepo.UpdateItem(ctx, repo.DropFileEncMedataQueue, qItem.Id, strconv.FormatInt(maxUpdatedAt, 10)) + if updateErr != nil { + ctxLogger.WithError(updateErr).Error("failed to update queueItem") + return + } + ctxLogger.Info("successfully dropped metadata") +} diff --git a/server/pkg/controller/usage.go b/server/pkg/controller/usage.go new file mode 100644 index 000000000..a05a10131 --- /dev/null +++ b/server/pkg/controller/usage.go @@ -0,0 +1,107 @@ +package controller + +import ( + "context" + "errors" + "github.com/ente-io/museum/ente" + bonus "github.com/ente-io/museum/ente/storagebonus" + "github.com/ente-io/museum/pkg/controller/storagebonus" + "github.com/ente-io/museum/pkg/controller/usercache" + "github.com/ente-io/museum/pkg/repo" + "github.com/ente-io/stacktrace" +) + +// UsageController exposes functions which can be used to check around storage +type UsageController struct { + BillingCtrl *BillingController + StorageBonusCtrl *storagebonus.Controller + UserCacheCtrl *usercache.Controller + UsageRepo *repo.UsageRepository + UserRepo *repo.UserRepository + FamilyRepo *repo.FamilyRepository + FileRepo *repo.FileRepository +} + +const MaxLockerFiles = 10000 + +// CanUploadFile returns error if the file of given size (with StorageOverflowAboveSubscriptionLimit buffer) can be +// uploaded or not. If size is not passed, it validates if current usage is less than subscription storage. +func (c *UsageController) CanUploadFile(ctx context.Context, userID int64, size *int64, app ente.App) error { + // If app is Locker, limit to MaxLockerFiles files + if app == ente.Locker { + // Get file count + if fileCount, err := c.UserCacheCtrl.GetUserFileCountWithCache(userID, app); err != nil { + if fileCount >= MaxLockerFiles { + return stacktrace.Propagate(ente.ErrFileLimitReached, "") + } + } + } + + familyAdminID, err := c.UserRepo.GetFamilyAdminID(userID) + if err != nil { + return stacktrace.Propagate(err, "") + } + var subscriptionAdminID int64 + var subscriptionUserIDs []int64 + // if user is part of a family group, validate if subscription of familyAdmin is valid & member's total storage + // is less than the storage accordingly to subscription plan of the admin + if familyAdminID != nil { + familyMembers, err := c.FamilyRepo.GetMembersWithStatus(*familyAdminID, repo.ActiveFamilyMemberStatus) + if err != nil { + return stacktrace.Propagate(err, "failed to fetch family members") + } + subscriptionAdminID = *familyAdminID + for _, familyMember := range familyMembers { + subscriptionUserIDs = append(subscriptionUserIDs, familyMember.MemberUserID) + } + } else { + subscriptionAdminID = userID + subscriptionUserIDs = []int64{userID} + } + + var subStorage int64 + var bonus *bonus.ActiveStorageBonus + sub, err := c.BillingCtrl.GetActiveSubscription(subscriptionAdminID) + if err != nil { + subStorage = 0 + if errors.Is(err, ente.ErrNoActiveSubscription) { + bonusRes, bonErr := c.UserCacheCtrl.GetActiveStorageBonus(ctx, subscriptionAdminID) + if bonErr != nil { + return stacktrace.Propagate(bonErr, "failed to get bonus data") + } + if bonusRes.GetMaxExpiry() <= 0 { + return stacktrace.Propagate(err, "all bonus & plan expired") + } + bonus = bonusRes + } else { + return stacktrace.Propagate(err, "") + } + } else { + subStorage = sub.Storage + } + usage, err := c.UsageRepo.GetCombinedUsage(ctx, subscriptionUserIDs) + if err != nil { + return stacktrace.Propagate(err, "") + } + newUsage := usage + + if size != nil { + // Add the size of the file to be uploaded to the current usage and buffer in sub.Storage + newUsage += *size + subStorage += StorageOverflowAboveSubscriptionLimit + } + if newUsage > subStorage { + if bonus == nil { + // Check if the subAdmin has any storage bonus + bonus, err = c.UserCacheCtrl.GetActiveStorageBonus(ctx, subscriptionAdminID) + if err != nil { + return stacktrace.Propagate(err, "failed to get storage bonus") + } + } + var eligibleBonus = bonus.GetUsableBonus(subStorage) + if newUsage > (subStorage + eligibleBonus) { + return stacktrace.Propagate(ente.ErrStorageLimitExceeded, "") + } + } + return nil +} diff --git a/server/pkg/controller/user/jwt.go b/server/pkg/controller/user/jwt.go new file mode 100644 index 000000000..d920e36b0 --- /dev/null +++ b/server/pkg/controller/user/jwt.go @@ -0,0 +1,50 @@ +package user + +import ( + "fmt" + + enteJWT "github.com/ente-io/museum/ente/jwt" + "github.com/ente-io/museum/pkg/utils/time" + "github.com/ente-io/stacktrace" + "github.com/golang-jwt/jwt" +) + +// jwt token validity = 1 day +const ValidForDays = 1 + +func (c *UserController) GetJWTToken(userID int64, scope enteJWT.ClaimScope) (string, error) { + // Create a new token object, specifying signing method and the claims + // you would like it to contain. + token := jwt.NewWithClaims(jwt.SigningMethodHS256, &enteJWT.WebCommonJWTClaim{ + UserID: userID, + ExpiryTime: time.NDaysFromNow(1), + ClaimScope: &scope, + }) + // Sign and get the complete encoded token as a string using the secret + tokenString, err := token.SignedString(c.JwtSecret) + + if err != nil { + return "", stacktrace.Propagate(err, "") + } + return tokenString, nil +} + +func (c *UserController) ValidateJWTToken(jwtToken string, scope enteJWT.ClaimScope) (int64, error) { + token, err := jwt.ParseWithClaims(jwtToken, &enteJWT.WebCommonJWTClaim{}, func(token *jwt.Token) (interface{}, error) { + if _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok { + return nil, stacktrace.Propagate(fmt.Errorf("unexpected signing method: %v", token.Header["alg"]), "") + } + return c.JwtSecret, nil + }) + if err != nil { + return -1, stacktrace.Propagate(err, "JWT parsed failed") + } + claims, ok := token.Claims.(*enteJWT.WebCommonJWTClaim) + if ok && token.Valid { + if claims.GetScope() != scope { + return -1, stacktrace.Propagate(fmt.Errorf("recived claimScope %s is different than expected scope: %s", claims.GetScope(), scope), "") + } + return claims.UserID, nil + } + return -1, stacktrace.Propagate(err, "JWT claim failed") +} diff --git a/server/pkg/controller/user/srp.go b/server/pkg/controller/user/srp.go new file mode 100644 index 000000000..336611f64 --- /dev/null +++ b/server/pkg/controller/user/srp.go @@ -0,0 +1,229 @@ +package user + +import ( + "context" + "github.com/ente-io/museum/ente" + "github.com/ente-io/museum/pkg/utils/auth" + "github.com/ente-io/stacktrace" + "github.com/gin-gonic/gin" + "github.com/google/uuid" + "github.com/kong/go-srp" + "github.com/sirupsen/logrus" + "net/http" +) + +const Srp4096Params = 4096 + +func (c *UserController) SetupSRP(context *gin.Context, userID int64, req ente.SetupSRPRequest) (*ente.SetupSRPResponse, error) { + srpB, sessionID, err := c.createAndInsertSRPSession(context, req.SrpUserID, req.SRPVerifier, req.SRPA) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + setupID, err := c.UserAuthRepo.InsertTempSRPSetup(context, req, userID, sessionID) + if err != nil { + return nil, stacktrace.Propagate(err, "failed to add entry in setup table") + } + + return &ente.SetupSRPResponse{ + SetupID: *setupID, + SRPB: *srpB, + }, nil +} + +func (c *UserController) CompleteSRPSetup(context *gin.Context, req ente.CompleteSRPSetupRequest) (*ente.CompleteSRPSetupResponse, error) { + userID := auth.GetUserID(context.Request.Header) + setup, err := c.UserAuthRepo.GetTempSRPSetupEntity(context, req.SetupID) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + srpM2, err := c.verifySRPSession(context, setup.Verifier, setup.SessionID, req.SRPM1) + if err != nil { + return nil, err + } + err = c.UserAuthRepo.InsertSRPAuth(context, userID, setup.SRPUserID, setup.Verifier, setup.Salt) + if err != nil { + return nil, stacktrace.Propagate(err, "failed to add entry in srp auth") + } + return &ente.CompleteSRPSetupResponse{ + SetupID: req.SetupID, + SRPM2: *srpM2, + }, nil +} + +// UpdateSrpAndKeyAttributes updates the SRP and keys attributes if the SRP setup is successfully done +func (c *UserController) UpdateSrpAndKeyAttributes(context *gin.Context, + userID int64, + req ente.UpdateSRPAndKeysRequest, + shouldClearTokens bool, +) (*ente.UpdateSRPSetupResponse, error) { + setup, err := c.UserAuthRepo.GetTempSRPSetupEntity(context, req.SetupID) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + srpM2, err := c.verifySRPSession(context, setup.Verifier, setup.SessionID, req.SRPM1) + if err != nil { + return nil, err + } + err = c.UserAuthRepo.InsertOrUpdateSRPAuthAndKeyAttr(context, userID, req, setup) + if err != nil { + return nil, stacktrace.Propagate(err, "failed to add entry in srp auth") + } + + if shouldClearTokens { + token := auth.GetToken(context) + err = c.UserAuthRepo.RemoveAllOtherTokens(userID, token) + if err != nil { + return nil, err + } + } else { + logrus.WithField("user_id", userID).Info("not clearing tokens") + } + + return &ente.UpdateSRPSetupResponse{ + SetupID: req.SetupID, + SRPM2: *srpM2, + }, nil +} + +func (c *UserController) GetSRPAttributes(context *gin.Context, email string) (*ente.GetSRPAttributesResponse, error) { + userID, err := c.UserRepo.GetUserIDWithEmail(email) + if err != nil { + return nil, stacktrace.Propagate(err, "user does not exist") + } + srpAttributes, err := c.UserAuthRepo.GetSRPAttributes(userID) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + return srpAttributes, nil +} + +func (c *UserController) CreateSrpSession(context *gin.Context, req ente.CreateSRPSessionRequest) (*ente.CreateSRPSessionResponse, error) { + + srpAuthEntity, err := c.UserAuthRepo.GetSRPAuthEntityBySRPUserID(context, req.SRPUserID) + if err != nil { + return nil, err + } + isEmailMFAEnabled, err := c.UserAuthRepo.IsEmailMFAEnabled(context, srpAuthEntity.UserID) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + + if *isEmailMFAEnabled { + return nil, stacktrace.Propagate(&ente.ApiError{ + Code: "EMAIL_MFA_ENABLED", + Message: "Email MFA is enabled", + HttpStatusCode: http.StatusConflict, + }, "email mfa is enabled") + } + + srpBBase64, sessionID, err := c.createAndInsertSRPSession(context, req.SRPUserID, srpAuthEntity.Verifier, req.SRPA) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + return &ente.CreateSRPSessionResponse{ + SRPB: *srpBBase64, + SessionID: *sessionID, + }, nil +} + +func (c *UserController) VerifySRPSession(context *gin.Context, req ente.VerifySRPSessionRequest) (*ente.EmailAuthorizationResponse, error) { + srpAuthEntity, err := c.UserAuthRepo.GetSRPAuthEntityBySRPUserID(context, req.SRPUserID) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + srpM2, err := c.verifySRPSession(context, srpAuthEntity.Verifier, req.SessionID, req.SRPM1) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + user, err := c.UserRepo.Get(srpAuthEntity.UserID) + if err != nil { + return nil, err + } + verResponse, err := c.onVerificationSuccess(context, user.Email, nil) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + verResponse.SrpM2 = srpM2 + return &verResponse, nil +} + +func (c *UserController) createAndInsertSRPSession( + gContext *gin.Context, + srpUserID uuid.UUID, + srpVerifier string, + srpA string, +) (*string, *uuid.UUID, error) { + + serverSecret := srp.GenKey() + srpParams := srp.GetParams(Srp4096Params) + srpServer := srp.NewServer(srpParams, convertStringToBytes(srpVerifier), serverSecret) + + if srpServer == nil { + return nil, nil, stacktrace.NewError("server is nil") + } + + srpServer.SetA(convertStringToBytes(srpA)) + + srpB := srpServer.ComputeB() + + if srpB == nil { + return nil, nil, stacktrace.NewError("srpB is nil") + } + + sessionID, err := c.UserAuthRepo.AddSRPSession(srpUserID, convertBytesToString(serverSecret), srpA) + + if err != nil { + return nil, nil, stacktrace.Propagate(err, "") + } + + srpBBase64 := convertBytesToString(srpB) + return &srpBBase64, &sessionID, nil +} + +func (c *UserController) verifySRPSession(ctx context.Context, + srpVerifier string, + sessionID uuid.UUID, + srpM1 string, +) (*string, error) { + srpSession, err := c.UserAuthRepo.GetSrpSessionEntity(ctx, sessionID) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + if srpSession.IsVerified { + return nil, stacktrace.Propagate(&ente.ApiError{ + Code: "SESSION_ALREADY_VERIFIED", + HttpStatusCode: http.StatusGone, + }, "") + } else if srpSession.AttemptCount >= 5 { + return nil, stacktrace.Propagate(&ente.ApiError{ + Code: "TOO_MANY_WRONG_ATTEMPTS", + HttpStatusCode: http.StatusGone, + }, "") + } + + srpParams := srp.GetParams(Srp4096Params) + srpServer := srp.NewServer(srpParams, convertStringToBytes(srpVerifier), convertStringToBytes(srpSession.ServerKey)) + + if srpServer == nil { + return nil, stacktrace.NewError("server is nil") + } + + srpServer.SetA(convertStringToBytes(srpSession.SRP_A)) + + srpM2Bytes, err := srpServer.CheckM1(convertStringToBytes(srpM1)) + + if err != nil { + err2 := c.UserAuthRepo.IncrementSrpSessionAttemptCount(ctx, sessionID) + if err2 != nil { + return nil, stacktrace.Propagate(err2, "") + } + return nil, stacktrace.Propagate(ente.ErrInvalidPassword, "failed to verify srp session") + } else { + err2 := c.UserAuthRepo.SetSrpSessionVerified(ctx, sessionID) + if err2 != nil { + return nil, stacktrace.Propagate(err2, "") + } + } + srpM2 := convertBytesToString(srpM2Bytes) + return &srpM2, nil +} diff --git a/server/pkg/controller/user/twofactor.go b/server/pkg/controller/user/twofactor.go new file mode 100644 index 000000000..ac5473b06 --- /dev/null +++ b/server/pkg/controller/user/twofactor.go @@ -0,0 +1,200 @@ +package user + +import ( + "bytes" + "encoding/base64" + "image/png" + + "github.com/ente-io/museum/pkg/utils/network" + + "github.com/ente-io/museum/ente" + "github.com/ente-io/museum/pkg/utils/auth" + "github.com/ente-io/museum/pkg/utils/crypto" + "github.com/ente-io/museum/pkg/utils/time" + "github.com/ente-io/stacktrace" + "github.com/gin-gonic/gin" + "github.com/pquerna/otp/totp" +) + +// SetupTwoFactor generates a two factor secret and sends it to user to setup his authenticator app with +func (c *UserController) SetupTwoFactor(userID int64) (ente.TwoFactorSecret, error) { + user, err := c.UserRepo.Get(userID) + if err != nil { + return ente.TwoFactorSecret{}, stacktrace.Propagate(err, "") + } + if _, keyErr := c.UserRepo.GetKeyAttributes(userID); keyErr != nil { + return ente.TwoFactorSecret{}, stacktrace.Propagate(keyErr, "User keys setup is not completed") + } + key, err := totp.Generate(totp.GenerateOpts{Issuer: TOTPIssuerORG, AccountName: user.Email}) + if err != nil { + return ente.TwoFactorSecret{}, stacktrace.Propagate(err, "") + } + encryptedSecret, err := crypto.Encrypt(key.Secret(), c.SecretEncryptionKey) + if err != nil { + return ente.TwoFactorSecret{}, stacktrace.Propagate(err, "") + } + secretHash, err := crypto.GetHash(key.Secret(), c.HashingKey) + if err != nil { + return ente.TwoFactorSecret{}, stacktrace.Propagate(err, "") + } + err = c.TwoFactorRepo.SetTempTwoFactorSecret(userID, encryptedSecret, secretHash, time.Microseconds()+TwoFactorValidityDurationInMicroSeconds) + if err != nil { + return ente.TwoFactorSecret{}, stacktrace.Propagate(err, "") + } + buf := new(bytes.Buffer) + img, err := key.Image(200, 200) + if err != nil { + return ente.TwoFactorSecret{}, stacktrace.Propagate(err, "") + } + err = png.Encode(buf, img) + if err != nil { + return ente.TwoFactorSecret{}, stacktrace.Propagate(err, "") + } + return ente.TwoFactorSecret{SecretCode: key.Secret(), QRCode: base64.StdEncoding.EncodeToString(buf.Bytes())}, nil +} + +// EnableTwoFactor handles the two factor activation request after user has setup his two factor by validing a totp request +func (c *UserController) EnableTwoFactor(userID int64, request ente.TwoFactorEnableRequest) error { + encryptedSecrets, hashedSecrets, err := c.TwoFactorRepo.GetTempTwoFactorSecret(userID) + if err != nil { + return stacktrace.Propagate(err, "") + } + valid := false + validSecret := "" + var validEncryptedSecret ente.EncryptionResult + var validSecretHash string + for index, encryptedSecret := range encryptedSecrets { + secret, err := crypto.Decrypt(encryptedSecret.Cipher, c.SecretEncryptionKey, encryptedSecret.Nonce) + if err != nil { + return stacktrace.Propagate(err, "") + } + valid = totp.Validate(request.Code, secret) + if valid { + validSecret = secret + validEncryptedSecret = encryptedSecret + validSecretHash = hashedSecrets[index] + break + } + } + if !valid { + return stacktrace.Propagate(ente.ErrIncorrectTOTP, "") + } + err = c.UserRepo.SetTwoFactorSecret(userID, validEncryptedSecret, validSecretHash, request.EncryptedTwoFactorSecret, request.TwoFactorSecretDecryptionNonce) + if err != nil { + return stacktrace.Propagate(err, "") + } + err = c.TwoFactorRepo.UpdateTwoFactorStatus(userID, true) + if err != nil { + return stacktrace.Propagate(err, "") + } + secretHash, err := crypto.GetHash(validSecret, c.HashingKey) + if err != nil { + return stacktrace.Propagate(err, "") + } + err = c.TwoFactorRepo.RemoveTempTwoFactorSecret(secretHash) + return stacktrace.Propagate(err, "") +} + +// VerifyTwoFactor handles the two factor validation request +func (c *UserController) VerifyTwoFactor(context *gin.Context, sessionID string, otp string) (ente.TwoFactorAuthorizationResponse, error) { + userID, err := c.TwoFactorRepo.GetUserIDWithTwoFactorSession(sessionID) + if err != nil { + return ente.TwoFactorAuthorizationResponse{}, stacktrace.Propagate(err, "") + } + isTwoFactorEnabled, err := c.UserRepo.IsTwoFactorEnabled(userID) + if err != nil { + return ente.TwoFactorAuthorizationResponse{}, stacktrace.Propagate(err, "") + } + if !isTwoFactorEnabled { + return ente.TwoFactorAuthorizationResponse{}, stacktrace.Propagate(ente.ErrBadRequest, "") + } + secret, err := c.TwoFactorRepo.GetTwoFactorSecret(userID) + if err != nil { + return ente.TwoFactorAuthorizationResponse{}, stacktrace.Propagate(err, "") + } + valid := totp.Validate(otp, secret) + if !valid { + return ente.TwoFactorAuthorizationResponse{}, stacktrace.Propagate(ente.ErrIncorrectTOTP, "") + } + response, err := c.GetKeyAttributeAndToken(context, userID) + if err != nil { + return ente.TwoFactorAuthorizationResponse{}, stacktrace.Propagate(err, "") + } + return response, nil +} + +// DisableTwoFactor disables the two factor authentication for a user +func (c *UserController) DisableTwoFactor(userID int64) error { + err := c.TwoFactorRepo.UpdateTwoFactorStatus(userID, false) + return stacktrace.Propagate(err, "") +} + +// RecoverTwoFactor handles the two factor recovery request by sending the +// recoveryKeyEncryptedTwoFactorSecret for the user to decrypt it and make twoFactor removal api call +func (c *UserController) RecoverTwoFactor(sessionID string) (ente.TwoFactorRecoveryResponse, error) { + userID, err := c.TwoFactorRepo.GetUserIDWithTwoFactorSession(sessionID) + if err != nil { + return ente.TwoFactorRecoveryResponse{}, stacktrace.Propagate(err, "") + } + response, err := c.TwoFactorRepo.GetRecoveryKeyEncryptedTwoFactorSecret(userID) + if err != nil { + return ente.TwoFactorRecoveryResponse{}, stacktrace.Propagate(err, "") + } + return response, nil +} + +// RemoveTwoFactor handles two factor deactivation request if user lost his device +// by authenticating him using his twoFactorsessionToken and twoFactor secret +func (c *UserController) RemoveTwoFactor(context *gin.Context, sessionID string, secret string) (ente.TwoFactorAuthorizationResponse, error) { + userID, err := c.TwoFactorRepo.GetUserIDWithTwoFactorSession(sessionID) + if err != nil { + return ente.TwoFactorAuthorizationResponse{}, stacktrace.Propagate(err, "") + } + secretHash, err := crypto.GetHash(secret, c.HashingKey) + if err != nil { + return ente.TwoFactorAuthorizationResponse{}, stacktrace.Propagate(err, "") + + } + exists, err := c.TwoFactorRepo.VerifyTwoFactorSecret(userID, secretHash) + if err != nil { + return ente.TwoFactorAuthorizationResponse{}, stacktrace.Propagate(err, "") + + } + if !exists { + return ente.TwoFactorAuthorizationResponse{}, stacktrace.Propagate(ente.ErrPermissionDenied, "") + } + err = c.TwoFactorRepo.UpdateTwoFactorStatus(userID, false) + if err != nil { + return ente.TwoFactorAuthorizationResponse{}, stacktrace.Propagate(err, "") + } + response, err := c.GetKeyAttributeAndToken(context, userID) + if err != nil { + return ente.TwoFactorAuthorizationResponse{}, stacktrace.Propagate(err, "") + } + return response, nil +} + +func (c *UserController) GetKeyAttributeAndToken(context *gin.Context, userID int64) (ente.TwoFactorAuthorizationResponse, error) { + keyAttributes, err := c.UserRepo.GetKeyAttributes(userID) + if err != nil { + return ente.TwoFactorAuthorizationResponse{}, stacktrace.Propagate(err, "") + } + token, err := auth.GenerateURLSafeRandomString(TokenLength) + if err != nil { + return ente.TwoFactorAuthorizationResponse{}, stacktrace.Propagate(err, "") + } + encryptedToken, err := crypto.GetEncryptedToken(token, keyAttributes.PublicKey) + if err != nil { + return ente.TwoFactorAuthorizationResponse{}, stacktrace.Propagate(err, "") + } + err = c.UserAuthRepo.AddToken(userID, auth.GetApp(context), + token, network.GetClientIP(context), context.Request.UserAgent()) + if err != nil { + return ente.TwoFactorAuthorizationResponse{}, stacktrace.Propagate(err, "") + } + return ente.TwoFactorAuthorizationResponse{ + ID: userID, + KeyAttributes: &keyAttributes, + EncryptedToken: encryptedToken, + }, nil +} diff --git a/server/pkg/controller/user/user.go b/server/pkg/controller/user/user.go new file mode 100644 index 000000000..4be02b24f --- /dev/null +++ b/server/pkg/controller/user/user.go @@ -0,0 +1,411 @@ +package user + +import ( + "errors" + "fmt" + "strings" + + cache2 "github.com/ente-io/museum/ente/cache" + "github.com/ente-io/museum/pkg/controller/discord" + "github.com/ente-io/museum/pkg/controller/usercache" + + "github.com/ente-io/museum/ente" + "github.com/ente-io/museum/pkg/controller" + "github.com/ente-io/museum/pkg/controller/family" + "github.com/ente-io/museum/pkg/repo" + "github.com/ente-io/museum/pkg/repo/datacleanup" + "github.com/ente-io/museum/pkg/repo/passkey" + storageBonusRepo "github.com/ente-io/museum/pkg/repo/storagebonus" + "github.com/ente-io/museum/pkg/utils/billing" + "github.com/ente-io/museum/pkg/utils/crypto" + "github.com/ente-io/museum/pkg/utils/email" + "github.com/ente-io/stacktrace" + "github.com/gin-gonic/gin" + "github.com/golang-jwt/jwt" + "github.com/patrickmn/go-cache" + "github.com/sirupsen/logrus" + "github.com/spf13/viper" +) + +// UserController exposes request handlers for all user related requests +type UserController struct { + UserRepo *repo.UserRepository + UsageRepo *repo.UsageRepository + UserAuthRepo *repo.UserAuthRepository + TwoFactorRepo *repo.TwoFactorRepository + PasskeyRepo *passkey.Repository + StorageBonusRepo *storageBonusRepo.Repository + FileRepo *repo.FileRepository + CollectionRepo *repo.CollectionRepository + DataCleanupRepo *datacleanup.Repository + CollectionCtrl *controller.CollectionController + BillingRepo *repo.BillingRepository + BillingController *controller.BillingController + FamilyController *family.Controller + DiscordController *discord.DiscordController + MailingListsController *controller.MailingListsController + PushController *controller.PushController + HashingKey []byte + SecretEncryptionKey []byte + JwtSecret []byte + Cache *cache.Cache // refers to the auth token cache + HardCodedOTT HardCodedOTT + roadmapURLPrefix string + roadmapSSOSecret string + UserCache *cache2.UserCache + UserCacheController *usercache.Controller +} + +const ( + // OTTValidityDurationInMicroSeconds is the duration for which an OTT is valid + // (60 minutes) + OTTValidityDurationInMicroSeconds = 60 * 60 * 1000000 + + // OTTWrongAttemptLimit is the max number of wrong attempt to verify OTT (to prevent bruteforce guessing) + // When client hits this limit, they will need to trigger new OTT. + OTTWrongAttemptLimit = 20 + + // OTTActiveCodeLimit is the max number of active OTT a user can have in + // a time window of OTTValidityDurationInMicroSeconds duration + OTTActiveCodeLimit = 10 + + // TwoFactorValidityDurationInMicroSeconds is the duration for which an OTT is valid + // (10 minutes) + TwoFactorValidityDurationInMicroSeconds = 10 * 60 * 1000000 + + // TokenLength is the length of the token issued to a verified user + TokenLength = 32 + + // TwoFactorSessionIDLength is the length of the twoFactorSessionID issued to a verified user + TwoFactorSessionIDLength = 32 + + // PassKeySessionIDLength is the length of the passKey sessionID issued to a verified user + PassKeySessionIDLength = 32 + + CryptoPwhashMemLimitInteractive = 67108864 + CryptoPwhashOpsLimitInteractive = 2 + + TOTPIssuerORG = "ente" + + // Template and subject for the mail that we send when the user deletes + // their account. + AccountDeletedEmailTemplate = "account_deleted.html" + AccountDeletedWithActiveSubscriptionEmailTemplate = "account_deleted_active_sub.html" + AccountDeletedEmailSubject = "Your ente account has been deleted" +) + +func NewUserController( + userRepo *repo.UserRepository, + usageRepo *repo.UsageRepository, + userAuthRepo *repo.UserAuthRepository, + twoFactorRepo *repo.TwoFactorRepository, + passkeyRepo *passkey.Repository, + storageBonusRepo *storageBonusRepo.Repository, + fileRepo *repo.FileRepository, + collectionController *controller.CollectionController, + collectionRepo *repo.CollectionRepository, + dataCleanupRepository *datacleanup.Repository, + billingRepo *repo.BillingRepository, + secretEncryptionKeyBytes []byte, + hashingKeyBytes []byte, + authCache *cache.Cache, + jwtSecretBytes []byte, + billingController *controller.BillingController, + familyController *family.Controller, + discordController *discord.DiscordController, + mailingListsController *controller.MailingListsController, + pushController *controller.PushController, + userCache *cache2.UserCache, + userCacheController *usercache.Controller, +) *UserController { + return &UserController{ + UserRepo: userRepo, + UsageRepo: usageRepo, + UserAuthRepo: userAuthRepo, + StorageBonusRepo: storageBonusRepo, + TwoFactorRepo: twoFactorRepo, + PasskeyRepo: passkeyRepo, + FileRepo: fileRepo, + CollectionCtrl: collectionController, + CollectionRepo: collectionRepo, + DataCleanupRepo: dataCleanupRepository, + BillingRepo: billingRepo, + SecretEncryptionKey: secretEncryptionKeyBytes, + HashingKey: hashingKeyBytes, + Cache: authCache, + JwtSecret: jwtSecretBytes, + BillingController: billingController, + FamilyController: familyController, + DiscordController: discordController, + MailingListsController: mailingListsController, + PushController: pushController, + HardCodedOTT: ReadHardCodedOTTFromConfig(), + roadmapURLPrefix: viper.GetString("roadmap.url-prefix"), + roadmapSSOSecret: viper.GetString("roadmap.sso-secret"), + UserCache: userCache, + UserCacheController: userCacheController, + } +} + +// GetAttributes returns the key attributes for a user +func (c *UserController) GetAttributes(userID int64) (ente.KeyAttributes, error) { + return c.UserRepo.GetKeyAttributes(userID) +} + +// SetAttributes sets the attributes for a user. The request will fail if key attributes are already set +func (c *UserController) SetAttributes(userID int64, request ente.SetUserAttributesRequest) error { + _, err := c.UserRepo.GetKeyAttributes(userID) + if err == nil { // If there are key attributes already set + return stacktrace.Propagate(ente.ErrPermissionDenied, "key attributes are already set") + } + if request.KeyAttributes.MemLimit <= 0 || request.KeyAttributes.OpsLimit <= 0 { + // note for curious soul in the future + _ = fmt.Sprintf("Older clients were not passing these values, so server used %d & %d as ops and memLimit", + CryptoPwhashOpsLimitInteractive, CryptoPwhashMemLimitInteractive) + return stacktrace.Propagate(ente.ErrBadRequest, "mem or ops limit should be > 0") + } + err = c.UserRepo.SetKeyAttributes(userID, request.KeyAttributes) + if err != nil { + return stacktrace.Propagate(err, "") + } + return nil +} + +// UpdateEmailMFA updates the email MFA for a user. +func (c *UserController) UpdateEmailMFA(context *gin.Context, userID int64, isEnabled bool) error { + if !isEnabled { + isSrpSetupDone, err := c.UserAuthRepo.IsSRPSetupDone(context, userID) + if err != nil { + return stacktrace.Propagate(err, "") + } + // if SRP is not setup, then we can not disable email MFA + if !isSrpSetupDone { + return stacktrace.Propagate(ente.NewConflictError("SRP setup incomplete"), "can not disable email MFA before SRP is setup") + } + } + return c.UserAuthRepo.UpdateEmailMFA(context, userID, isEnabled) +} + +// UpdateKeys updates the user keys on password change +func (c *UserController) UpdateKeys(context *gin.Context, userID int64, + request ente.UpdateKeysRequest, token string) error { + /* + todo: send email to the user on password change and may be keep history of old keys for X days. + History will allow easy recovery of the account when password is changed by a bad actor + */ + isSRPSetupDone, err := c.UserAuthRepo.IsSRPSetupDone(context, userID) + if err != nil { + return err + } + if isSRPSetupDone { + return stacktrace.Propagate(ente.NewBadRequestWithMessage("Need to upgrade client"), "can not use old API to change password after SRP is setup") + } + err = c.UserRepo.UpdateKeys(userID, request) + if err != nil { + return stacktrace.Propagate(err, "") + } + err = c.UserAuthRepo.RemoveAllOtherTokens(userID, token) + if err != nil { + return stacktrace.Propagate(err, "") + } + return nil +} + +// SetRecoveryKey sets the recovery key attributes for a user, if not already set +func (c *UserController) SetRecoveryKey(userID int64, request ente.SetRecoveryKeyRequest) error { + keyAttr, keyErr := c.UserRepo.GetKeyAttributes(userID) + if keyErr != nil { + return stacktrace.Propagate(keyErr, "User keys setup is not completed") + } + if keyAttr.RecoveryKeyEncryptedWithMasterKey != "" { + return stacktrace.Propagate(errors.New("recovery key is already set"), "") + } + err := c.UserRepo.SetRecoveryKeyAttributes(userID, request) + if err != nil { + return stacktrace.Propagate(err, "") + } + return nil +} + +// GetPublicKey returns the public key of a user +func (c *UserController) GetPublicKey(email string) (string, error) { + userID, err := c.UserRepo.GetUserIDWithEmail(email) + if err != nil { + return "", stacktrace.Propagate(err, "") + } + key, err := c.UserRepo.GetPublicKey(userID) + if err != nil { + return "", stacktrace.Propagate(err, "") + } + return key, nil +} + +// GetRoadmapURL redirects the user to the feedback page +func (c *UserController) GetRoadmapURL(userID int64) (string, error) { + // If SSO is not configured, redirect the user to the plain roadmap + if c.roadmapURLPrefix == "" || c.roadmapSSOSecret == "" { + return "https://roadmap.ente.io", nil + } + user, err := c.UserRepo.Get(userID) + if err != nil { + return "", stacktrace.Propagate(err, "") + } + userData := jwt.MapClaims{ + "full_name": "", + "email": user.Hash + "@ente.io", + } + token := jwt.NewWithClaims(jwt.SigningMethodHS256, userData) + signature, err := token.SignedString([]byte(c.roadmapSSOSecret)) + if err != nil { + return "", stacktrace.Propagate(err, "") + } + return c.roadmapURLPrefix + signature, nil +} + +// GetTwoFactorStatus returns a user's two factor status +func (c *UserController) GetTwoFactorStatus(userID int64) (bool, error) { + isTwoFactorEnabled, err := c.UserRepo.IsTwoFactorEnabled(userID) + if err != nil { + return false, stacktrace.Propagate(err, "") + } + return isTwoFactorEnabled, nil +} + +func (c *UserController) HandleAccountDeletion(ctx *gin.Context, userID int64, logger *logrus.Entry) (*ente.DeleteAccountResponse, error) { + isSubscriptionCancelled, err := c.BillingController.HandleAccountDeletion(ctx, userID, logger) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + + err = c.CollectionCtrl.HandleAccountDeletion(ctx, userID, logger) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + + err = c.FamilyController.HandleAccountDeletion(ctx, userID, logger) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + + logger.Info("remove push tokens for user") + c.PushController.RemoveTokensForUser(userID) + + logger.Info("remove active tokens for user") + err = c.UserAuthRepo.RemoveAllTokens(userID) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + + user, err := c.UserRepo.Get(userID) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + + email := user.Email + // See also: Do not block on mailing list errors + go func() { + _ = c.MailingListsController.Unsubscribe(email) + }() + + logger.Info("mark user as deleted") + err = c.UserRepo.Delete(userID) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + + logger.Info("schedule data deletion") + err = c.DataCleanupRepo.Insert(ctx, userID) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + + go c.NotifyAccountDeletion(email, isSubscriptionCancelled) + + return &ente.DeleteAccountResponse{ + IsSubscriptionCancelled: isSubscriptionCancelled, + UserID: userID, + }, nil + +} + +func (c *UserController) NotifyAccountDeletion(userEmail string, isSubscriptionCancelled bool) { + template := AccountDeletedEmailTemplate + if !isSubscriptionCancelled { + template = AccountDeletedWithActiveSubscriptionEmailTemplate + } + err := email.SendTemplatedEmail([]string{userEmail}, "ente", "team@ente.io", + AccountDeletedEmailSubject, template, nil, nil) + if err != nil { + logrus.WithError(err).Errorf("Failed to send the account deletion email to %s", userEmail) + } +} + +func (c *UserController) HandleAccountRecovery(ctx *gin.Context, req ente.RecoverAccountRequest) error { + _, err := c.UserRepo.Get(req.UserID) + if err == nil { + return stacktrace.Propagate(ente.NewBadRequestError(&ente.ApiErrorParams{ + Message: "User ID is linked to undeleted account", + }), "") + } + if !errors.Is(err, ente.ErrUserDeleted) { + return stacktrace.Propagate(err, "error while getting the user") + } + // check if the user keyAttributes are still available + if _, keyErr := c.UserRepo.GetKeyAttributes(req.UserID); keyErr != nil { + return stacktrace.Propagate(keyErr, "keyAttributes missing? Account can not be recovered") + } + email := strings.ToLower(req.EmailID) + encryptedEmail, err := crypto.Encrypt(email, c.SecretEncryptionKey) + if err != nil { + return stacktrace.Propagate(err, "") + } + emailHash, err := crypto.GetHash(email, c.HashingKey) + if err != nil { + return stacktrace.Propagate(err, "") + } + err = c.UserRepo.UpdateEmail(req.UserID, encryptedEmail, emailHash) + return stacktrace.Propagate(err, "failed to update email") +} + +func (c *UserController) attachFreeSubscription(userID int64) (ente.Subscription, error) { + subscription := billing.GetFreeSubscription(userID) + generatedID, err := c.BillingRepo.AddSubscription(subscription) + if err != nil { + return subscription, stacktrace.Propagate(err, "") + } + subscription.ID = generatedID + return subscription, nil +} + +func (c *UserController) createUser(email string, source *string) (int64, ente.Subscription, error) { + encryptedEmail, err := crypto.Encrypt(email, c.SecretEncryptionKey) + if err != nil { + return -1, ente.Subscription{}, stacktrace.Propagate(err, "") + } + emailHash, err := crypto.GetHash(email, c.HashingKey) + if err != nil { + return -1, ente.Subscription{}, stacktrace.Propagate(err, "") + } + userID, err := c.UserRepo.Create(encryptedEmail, emailHash, source) + if err != nil { + return -1, ente.Subscription{}, stacktrace.Propagate(err, "") + } + err = c.UsageRepo.Create(userID) + if err != nil { + return -1, ente.Subscription{}, stacktrace.Propagate(err, "failed to add entry in usage") + } + subscription, err := c.attachFreeSubscription(userID) + if err != nil { + return -1, ente.Subscription{}, stacktrace.Propagate(err, "") + } + // Do not block on mailing list errors + // + // The mailing lists are hosted on a third party (Zoho), so we do not wish + // to fail user creation in case Zoho is having temporary issues. So we + // perform these actions async, and ignore errors that happen with them (a + // notification will be sent to Discord for those). + go func() { + _ = c.MailingListsController.Subscribe(email) + }() + return userID, subscription, nil +} diff --git a/server/pkg/controller/user/user_delete.go b/server/pkg/controller/user/user_delete.go new file mode 100644 index 000000000..5bbf8ce0b --- /dev/null +++ b/server/pkg/controller/user/user_delete.go @@ -0,0 +1,91 @@ +package user + +import ( + "encoding/base64" + + "github.com/ente-io/museum/ente" + enteJWT "github.com/ente-io/museum/ente/jwt" + "github.com/ente-io/museum/pkg/utils/auth" + "github.com/ente-io/museum/pkg/utils/crypto" + "github.com/ente-io/stacktrace" + "github.com/gin-contrib/requestid" + "github.com/gin-gonic/gin" + "github.com/sirupsen/logrus" +) + +func (c *UserController) GetDeleteChallengeToken(ctx *gin.Context) (*ente.DeleteChallengeResponse, error) { + userID := auth.GetUserID(ctx.Request.Header) + user, err := c.UserRepo.Get(userID) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + keyAttributes, err := c.UserRepo.GetKeyAttributes(userID) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + logger := logrus.WithFields(logrus.Fields{ + "user_id": userID, + "user_email": user.Email, + "req_id": requestid.Get(ctx), + "req_ctx": "request_self_delete", + }) + logger.Info("User initiated self-delete") + subscription, err := c.BillingController.GetSubscription(ctx, userID) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + /* todo: add check to see if there's pending abuse report or if user's master password + was changed in last X days. + */ + shouldNotifyDiscord := subscription.ProductID != ente.FreePlanProductID + if shouldNotifyDiscord { + go c.DiscordController.NotifyAccountDelete(user.ID, string(subscription.PaymentProvider), subscription.ProductID) + } + token, err := c.GetJWTToken(userID, enteJWT.DELETE_ACCOUNT) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + encryptedToken, err := crypto.GetEncryptedToken(base64.StdEncoding.EncodeToString([]byte(token)), keyAttributes.PublicKey) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + return &ente.DeleteChallengeResponse{ + EncryptedChallenge: &encryptedToken, + AllowDelete: true, + }, nil +} + +func (c *UserController) SelfDeleteAccount(ctx *gin.Context, req ente.DeleteAccountRequest) (*ente.DeleteAccountResponse, error) { + userID := auth.GetUserID(ctx.Request.Header) + tokenUserID, err := c.ValidateJWTToken(req.Challenge, enteJWT.DELETE_ACCOUNT) + if err != nil { + return nil, stacktrace.Propagate(err, "failed to validate jwt token") + } + if tokenUserID != userID { + return nil, stacktrace.Propagate(ente.ErrPermissionDenied, "jwtToken belongs to different user") + } + user, err := c.UserRepo.Get(userID) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + _, err = c.BillingController.GetSubscription(ctx, userID) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + logger := logrus.WithFields(logrus.Fields{ + "user_id": userID, + "user_email": user.Email, + "req_id": requestid.Get(ctx), + "req_ctx": "self_account_deletion", + }) + resp, err := c.HandleAccountDeletion(ctx, userID, logger) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + // Update reason, ignore failure in updating reason + updateErr := c.UserRepo.UpdateDeleteFeedback(userID, req.GetReasonAttr()) + if updateErr != nil { + logger.WithError(updateErr).Error("failed to update delete feedback") + } + return resp, nil +} diff --git a/server/pkg/controller/user/user_details.go b/server/pkg/controller/user/user_details.go new file mode 100644 index 000000000..08d3ad016 --- /dev/null +++ b/server/pkg/controller/user/user_details.go @@ -0,0 +1,170 @@ +package user + +import ( + "github.com/ente-io/museum/ente" + "github.com/ente-io/museum/ente/details" + bonus "github.com/ente-io/museum/ente/storagebonus" + "github.com/ente-io/museum/pkg/utils/auth" + "github.com/ente-io/museum/pkg/utils/recover" + "github.com/ente-io/museum/pkg/utils/time" + "github.com/ente-io/stacktrace" + "github.com/gin-gonic/gin" + "golang.org/x/sync/errgroup" +) + +func (c *UserController) GetDetails(ctx *gin.Context) (details.UserDetailsResponse, error) { + + enteApp := ctx.MustGet("app").(ente.App) + + userID := auth.GetUserID(ctx.Request.Header) + user, err := c.UserRepo.Get(userID) + if err != nil { + return details.UserDetailsResponse{}, stacktrace.Propagate(err, "") + } + usage, err := c.FileRepo.GetUsage(userID) + if err != nil { + return details.UserDetailsResponse{}, stacktrace.Propagate(err, "") + } + fileCount, err := c.FileRepo.GetFileCountForUser(userID, enteApp) + if err != nil { + return details.UserDetailsResponse{}, stacktrace.Propagate(err, "") + } + sharedCollectionsCount, err := c.CollectionRepo.GetSharedCollectionsCount(userID) + if err != nil { + return details.UserDetailsResponse{}, stacktrace.Propagate(err, "") + } + subscription, err := c.BillingController.GetSubscription(ctx, userID) + if err != nil { + return details.UserDetailsResponse{}, stacktrace.Propagate(err, "") + } + return details.UserDetailsResponse{ + Email: user.Email, + Usage: usage, + FileCount: &fileCount, + SharedCollectionsCount: &sharedCollectionsCount, + Subscription: subscription, + }, nil +} + +func (c *UserController) getUserFileCountWithCache(userID int64, app ente.App) (int64, error) { + // Check if the value is present in the cache + if count, ok := c.UserCache.GetFileCount(userID, app); ok { + // Cache hit, update the cache asynchronously + go func() { + _, _ = c.getUserCountAndUpdateCache(userID, app) + }() + return count, nil + } + return c.getUserCountAndUpdateCache(userID, app) +} + +func (c *UserController) getUserCountAndUpdateCache(userID int64, app ente.App) (int64, error) { + count, err := c.FileRepo.GetFileCountForUser(userID, app) + if err != nil { + return 0, stacktrace.Propagate(err, "") + } + c.UserCache.SetFileCount(userID, count, app) + return count, nil +} + +func (c *UserController) GetDetailsV2(ctx *gin.Context, userID int64, fetchMemoryCount bool, app ente.App) (details.UserDetailsResponse, error) { + + g := new(errgroup.Group) + var user *ente.User + var familyData *ente.FamilyMemberResponse + var subscription *ente.Subscription + var canDisableEmailMFA bool + var fileCount, sharedCollectionCount, usage int64 + var bonus *bonus.ActiveStorageBonus + g.Go(func() error { + resp, err := c.UserRepo.Get(userID) + if err != nil { + return stacktrace.Propagate(err, "failed to get user") + } + user = &resp + bonusUserId := userID + if user.FamilyAdminID != nil { + bonusUserId = *user.FamilyAdminID + familyDataResp, familyErr := c.FamilyController.FetchMembersForAdminID(ctx, *user.FamilyAdminID) + if familyErr != nil { + return stacktrace.Propagate(familyErr, "") + } + familyData = &familyDataResp + } + bonusValue, bonusErr := c.UserCacheController.GetActiveStorageBonus(ctx, bonusUserId) + if bonusErr != nil { + return stacktrace.Propagate(bonusErr, "failed to fetch storage bonus") + } + bonus = bonusValue + return nil + }) + + g.Go(func() error { + subResp, err := c.BillingController.GetSubscription(ctx, userID) + if err != nil { + return stacktrace.Propagate(err, "") + } + subscription = &subResp + return nil + }) + g.Go(func() error { + isSRPSetupDone, err := c.UserAuthRepo.IsSRPSetupDone(ctx, userID) + if err != nil { + return stacktrace.Propagate(err, "") + } + canDisableEmailMFA = isSRPSetupDone + return nil + }) + g.Go(func() error { + return recover.Int64ToInt64RecoverWrapper(userID, c.FileRepo.GetUsage, &usage) + }) + + if fetchMemoryCount { + g.Go(func() error { + fCount, err := c.getUserFileCountWithCache(userID, app) + if err == nil { + fileCount = fCount + } + + return err + }) + } + + // g.Wait waits for all goroutines to complete + // and returns the first non-nil error returned + // by one of the goroutines. + if err := g.Wait(); err != nil { + return details.UserDetailsResponse{}, stacktrace.Propagate(err, "") + } + var planStoreForBonusComputation = subscription.Storage + if subscription.ExpiryTime < time.Microseconds() { + planStoreForBonusComputation = 0 + } + if familyData != nil { + if familyData.ExpiryTime < time.Microseconds() { + familyData.Storage = 0 + } else { + planStoreForBonusComputation = familyData.Storage + } + } + storageBonus := bonus.GetUsableBonus(planStoreForBonusComputation) + var result = details.UserDetailsResponse{ + Email: user.Email, + FamilyData: familyData, + Subscription: *subscription, + Usage: usage, + StorageBonus: storageBonus, + ProfileData: &ente.ProfileData{ + CanDisableEmailMFA: canDisableEmailMFA, + IsEmailMFAEnabled: *user.IsEmailMFAEnabled, + IsTwoFactorEnabled: *user.IsTwoFactorEnabled, + }, + BonusData: bonus, + } + if fetchMemoryCount { + result.FileCount = &fileCount + // Note: SharedCollectionsCount is deprecated. Returning default value as 0 + result.SharedCollectionsCount = &sharedCollectionCount + } + return result, nil +} diff --git a/server/pkg/controller/user/userauth.go b/server/pkg/controller/user/userauth.go new file mode 100644 index 000000000..bf092cb14 --- /dev/null +++ b/server/pkg/controller/user/userauth.go @@ -0,0 +1,405 @@ +package user + +import ( + "database/sql" + "encoding/base64" + "errors" + "fmt" + "github.com/ente-io/museum/pkg/utils/random" + "strings" + + "github.com/ente-io/museum/pkg/utils/config" + "github.com/ente-io/museum/pkg/utils/network" + "github.com/gin-contrib/requestid" + "github.com/spf13/viper" + + "github.com/ente-io/museum/ente" + "github.com/ente-io/museum/pkg/utils/auth" + "github.com/ente-io/museum/pkg/utils/crypto" + emailUtil "github.com/ente-io/museum/pkg/utils/email" + "github.com/ente-io/museum/pkg/utils/time" + "github.com/ente-io/stacktrace" + "github.com/gin-gonic/gin" + log "github.com/sirupsen/logrus" +) + +type HardCodedOTTEmail struct { + Email string + Value string +} + +type HardCodedOTT struct { + Emails []HardCodedOTTEmail + LocalDomainSuffix string + LocalDomainValue string +} + +func ReadHardCodedOTTFromConfig() HardCodedOTT { + emails := make([]HardCodedOTTEmail, 0) + emailsSlice := viper.GetStringSlice("internal.hardcoded-ott.emails") + for _, entry := range emailsSlice { + xs := strings.Split(entry, ",") + if len(xs) == 2 && xs[0] != "" && xs[1] != "" { + emails = append(emails, HardCodedOTTEmail{ + Email: xs[0], + Value: xs[1], + }) + } else { + log.Errorf("Ignoring malformed internal.hardcoded-ott.emails entry %s", entry) + } + } + + localDomainSuffix := "" + localDomainValue := "" + if config.IsLocalEnvironment() { + localDomainSuffix = viper.GetString("internal.hardcoded-ott.local-domain-suffix") + localDomainValue = viper.GetString("internal.hardcoded-ott.local-domain-value") + } + + return HardCodedOTT{ + Emails: emails, + LocalDomainSuffix: localDomainSuffix, + LocalDomainValue: localDomainValue, + } +} + +func hardcodedOTTForEmail(hardCodedOTT HardCodedOTT, email string) string { + for _, entry := range hardCodedOTT.Emails { + if email == entry.Email { + return entry.Value + } + } + + if hardCodedOTT.LocalDomainSuffix != "" && strings.HasSuffix(email, hardCodedOTT.LocalDomainSuffix) { + return hardCodedOTT.LocalDomainValue + } + + return "" +} + +// SendEmailOTT generates and sends an OTT to the provided email address +func (c *UserController) SendEmailOTT(context *gin.Context, email string, client string, purpose string) error { + if purpose == ente.ChangeEmailOTTPurpose { + _, err := c.UserRepo.GetUserIDWithEmail(email) + if err == nil { + // email already owned by a user + return stacktrace.Propagate(ente.ErrPermissionDenied, "") + } + if !errors.Is(err, sql.ErrNoRows) { + // unknown error, rethrow + return stacktrace.Propagate(err, "") + } + } + ott, err := random.GenerateSixDigitOtp() + if err != nil { + return stacktrace.Propagate(err, "") + } + // for hard-coded ott, adding same OTT in db can throw error + hasHardcodedOTT := false + if purpose != ente.ChangeEmailOTTPurpose { + hardCodedOTT := hardcodedOTTForEmail(c.HardCodedOTT, email) + if hardCodedOTT != "" { + log.Warn(fmt.Sprintf("returning hardcoded ott for %s", email)) + hasHardcodedOTT = true + ott = hardCodedOTT + } + } + emailHash, err := crypto.GetHash(email, c.HashingKey) + if err != nil { + return stacktrace.Propagate(err, "") + } + // check if user has already requested for more than 10 codes in last 10mins + otts, _ := c.UserAuthRepo.GetValidOTTs(emailHash, auth.GetApp(context)) + if len(otts) >= OTTActiveCodeLimit { + msg := "Too many ott requests in a short duration" + go c.DiscordController.NotifyPotentialAbuse(msg) + return stacktrace.Propagate(ente.ErrTooManyBadRequest, msg) + } + + err = c.UserAuthRepo.AddOTT(emailHash, auth.GetApp(context), ott, time.Microseconds()+OTTValidityDurationInMicroSeconds) + if !hasHardcodedOTT { + // ignore error for AddOTT for hardcode OTT. This is to avoid error when unique OTT check fails at db layer + if err != nil { + return stacktrace.Propagate(err, "") + } + log.Info("Added ott for " + emailHash + ": " + ott) + err = emailOTT(context, email, ott, client, purpose) + if err != nil { + return stacktrace.Propagate(err, "") + } + } else { + log.Info("Added hard coded ott for " + email + " : " + ott) + } + return nil +} + +// verifyEmailOtt should be deprecated in favor of verifyEmailOttWithSession once clients are updated. +func (c *UserController) verifyEmailOtt(context *gin.Context, email string, ott string) error { + ott = strings.TrimSpace(ott) + emailHash, err := crypto.GetHash(email, c.HashingKey) + if err != nil { + return stacktrace.Propagate(err, "") + } + wrongAttempt, err := c.UserAuthRepo.GetMaxWrongAttempts(emailHash) + if err != nil { + return stacktrace.Propagate(err, "") + } + + if wrongAttempt >= OTTWrongAttemptLimit { + msg := "Too many wrong attempts for ott verification" + go c.DiscordController.NotifyPotentialAbuse(msg) + return stacktrace.Propagate(ente.ErrTooManyBadRequest, "User needs to wait before active ott are expired") + } + + otts, err := c.UserAuthRepo.GetValidOTTs(emailHash, auth.GetApp(context)) + log.Info("Valid otts for " + emailHash + " are " + strings.Join(otts, ",")) + if err != nil { + return stacktrace.Propagate(err, "") + } + if len(otts) < 1 { + return stacktrace.Propagate(ente.ErrExpiredOTT, "") + } + isValidOTT := false + for _, validOTT := range otts { + if ott == validOTT { + isValidOTT = true + } + } + if !isValidOTT { + if err = c.UserAuthRepo.RecordWrongAttemptForActiveOtt(emailHash); err != nil { + log.WithError(err).Warn("Failed to track wrong attempt") + } + return stacktrace.Propagate(ente.ErrIncorrectOTT, "") + } + err = c.UserAuthRepo.RemoveOTT(emailHash, ott) + if err != nil { + return stacktrace.Propagate(err, "") + } + return nil +} + +// VerifyEmail validates that the OTT provided in the request is valid for the +// provided email address and if yes returns the users credentials +func (c *UserController) VerifyEmail(context *gin.Context, request ente.EmailVerificationRequest) (ente.EmailAuthorizationResponse, error) { + email := strings.ToLower(request.Email) + err := c.verifyEmailOtt(context, email, request.OTT) + if err != nil { + return ente.EmailAuthorizationResponse{}, stacktrace.Propagate(err, "") + } + return c.onVerificationSuccess(context, email, request.Source) +} + +// ChangeEmail validates that the OTT provided in the request is valid for the +// provided email address and if yes updates the user's existing email address +func (c *UserController) ChangeEmail(ctx *gin.Context, request ente.EmailVerificationRequest) error { + email := strings.ToLower(request.Email) + err := c.verifyEmailOtt(ctx, email, request.OTT) + if err != nil { + return stacktrace.Propagate(err, "") + } + _, err = c.UserRepo.GetUserIDWithEmail(email) + if err == nil { + // email already owned by a user + return stacktrace.Propagate(ente.ErrPermissionDenied, "") + } + if !errors.Is(err, sql.ErrNoRows) { + // unknown error, rethrow + return stacktrace.Propagate(err, "") + } + userID := auth.GetUserID(ctx.Request.Header) + user, err := c.UserRepo.Get(userID) + if err != nil { + return stacktrace.Propagate(err, "") + } + oldEmail := user.Email + encryptedEmail, err := crypto.Encrypt(email, c.SecretEncryptionKey) + if err != nil { + return stacktrace.Propagate(err, "") + } + emailHash, err := crypto.GetHash(email, c.HashingKey) + if err != nil { + return stacktrace.Propagate(err, "") + } + err = c.UserRepo.UpdateEmail(userID, encryptedEmail, emailHash) + if err != nil { + return stacktrace.Propagate(err, "") + } + _ = emailUtil.SendTemplatedEmail([]string{user.Email}, "ente", "team@ente.io", + ente.EmailChangedSubject, ente.EmailChangedTemplate, map[string]interface{}{ + "NewEmail": email, + }, nil) + + err = c.BillingController.UpdateBillingEmail(userID, email) + if err != nil { + log.WithError(err). + WithFields(log.Fields{ + "req_id": requestid.Get(ctx), + "user_id": userID, + }).Error("stripe update email failed") + } + + // Unsubscribe the old email, subscribe the new one. + // + // Note that resubscribing the same email after it has been unsubscribed + // once works fine. + // + // See also: Do not block on mailing list errors + go func() { + _ = c.MailingListsController.Unsubscribe(oldEmail) + _ = c.MailingListsController.Subscribe(email) + }() + + return nil +} + +// Logout removes the token from the cache and database. +// known issue: the token may be still cached in other instances till the expiry time (10min), JWTs might remain too +func (c *UserController) Logout(ctx *gin.Context) error { + token := auth.GetToken(ctx) + userID := auth.GetUserID(ctx.Request.Header) + return c.TerminateSession(userID, token) +} + +// GetActiveSessions returns the list of active tokens for userID +func (c *UserController) GetActiveSessions(context *gin.Context, userID int64) ([]ente.Session, error) { + tokens, err := c.UserAuthRepo.GetActiveSessions(userID, auth.GetApp(context)) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + return tokens, nil +} + +// TerminateSession removes the token for a user from cache and database +func (c *UserController) TerminateSession(userID int64, token string) error { + c.Cache.Delete(fmt.Sprintf("%s:%s", ente.Photos, token)) + c.Cache.Delete(fmt.Sprintf("%s:%s", ente.Auth, token)) + return stacktrace.Propagate(c.UserAuthRepo.RemoveToken(userID, token), "") +} + +func emailOTT(c *gin.Context, to string, ott string, client string, purpose string) error { + var templateName string + if auth.GetApp(c) == ente.Auth { + templateName = ente.AuthOTTTemplate + } else { + templateName = ente.PhotosOTTTemplate + } + if purpose == ente.ChangeEmailOTTPurpose { + templateName = ente.ChangeEmailOTTTemplate + } + var inlineImages []map[string]interface{} + inlineImage := make(map[string]interface{}) + inlineImage["cid"] = "img-email-verification-header" + inlineImage["mime_type"] = "image/png" + if auth.GetApp(c) == ente.Photos { + inlineImage["content"] = "iVBORw0KGgoAAAANSUhEUgAAAMgAAACsCAYAAAA+PePSAAAACXBIWXMAABYlAAAWJQFJUiTwAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAABFKSURBVHgB7Z1fjFXVFca/UdQRRbGaVMMQLk1ASR9gGg21bZpBxofGWsCH4oOUmcSXogRN+4CYVqyV0FQDhED6YDND6IP0QUHti4VybVItaZWhPkz5YziEmWhSiMAADqJO93fuOTAz3vlzz9l7n733Wb9k5w5xWrh/vrvWt9baezdBMMbQ0FBFPUxXa4Fa/HlW8p8qycKwP9fjTJ11InnsSR+bmprOQDBCE4TcKCFQBBW12tSaj6uCmA47pII5pFaEmmiqEHIjAslAIgiKYCmuCsKWGBqhippodkMiTSZEIJMkSZcoiCWoRQofqaq1R63dSiwRhAkRgYyDEgUjAwXRgbF9gq9EqIlls4hlbEQgo0jSpw74HSkahf5li1pVEctIRCAJShht6mENaqJw0U/YolutHWLya5ReIEoYHephJcoTLSZLHFWUULpRYkopkCSNYrToQHjeQjeRWs+XVSilE4gSB4WxHuVOo7IQoYRCKY1AlDBYot0EiRh5iVAioQQvkMR8PwfxGLqJ1FoUetXrGgQKfYZajBj7IeIwQUWt4+o17kqaqEESZARJKlMUh/gMO0QINO0KSiDJN1kXJGIURTdqQokQCMEIJDHhFIdEjWKJEFA08d6DDPMar0PE4QIVtehLNiX9Jq/xOoIkKRVNeAWCi0TwvNLlbQRR4uB4yEGIOFymotbBpGjiJV4KRL3g7Gt0Q1IqH+B71JW8Z97hVYqV5LT0Gx0QfIQ7Gzt92tnojUASv8EXeD4Core3N179/f3xn6dNm4aFCxdi3rx5CJQIHvkSLwQSmhk/d+4cduzYge7ubgwMDNT9nRkzZmD16tV45JFHECARPBGJ8wIJTRx9fX1YsWLFlYgxERTKzp070dLSgsCI4IFInBZIaOJgKvXYY4+NGTXG4pZbbolFEmDaFcFxkThbxQpNHK+99hqWLFnSsDgIUzL+b/n/ERgVtfa7POzoZAQJTRz0Ghs2bIAO6Eu4AiOCo5HEOYGEJo6tW7fGSyciEns4JZDQxPHiiy/G1SoTiEjs4IxAkiZgEKMj9AxMqUx7BpaA161bF5v4gOBpKotcaSa6JBBO4y6F51AcLOOyYmUDVrZY4QpMJN1KIJ1wACeqWMmcjvfiYI+D1SZb4iD8u/h38u8OiA5XZrcKjyDJpGcXPKfRBqBuAm0odqhIYsbETZJCBZKYcvoOr6dyszYAdUORbN++PaSGIn1Ia5GmvbAUKzHlrFh5LY69e/c6IQ7C6MUoxn9TIMSfkSJ3JhbpQZhjVuAxbACuWrXKCXGksEjAf5Op8nIBVFD7rBRCISlWCL7DRANQN4H1SpapVGs3LGNdICE0A30QR0pAIinEjxQhkG7UrhvwkrVr13o3NMiG4saNGxEAvOBnESxiVSA+p1bM7Z944gkcOHAAPsJditu2bQuhodhp88wtawLxObVij4PisNkANEEgvRKmWrNtjaLYrGKth6fisDk6YpK0DOx5150lX2tVLSsRJIkex+EZFAVLpkV1x00RSCRZZOMeRVsRZD88g16DDcDQxEH4nJYuXep7VLQSRYwLJDHmFXgEq1RMRVxqAOomgG28bcmB5UYxnmKpJ8HUqgJP8KnHoQuPeyWRSrNmwyBGI4hv0aOM4iAeP++K+ow9BYMYjSA+RQ+T22N9oaOjI96h6BlGy77GIogv0YO5OLvjZRcH4fAl+z18TTyCZV9jUcRYBPEhetjeHusL3E/CrrtHZWBjXsRIBPEhehSxPdYX+Jp41lCsmLqDxFSKtQYOU/T2WB/wsOtuZABWu0CUktvUwwI4SnrIgYhjYlKReBJl25LPnlZMRJAOOAqbYq5sj/UFz7bxas9ctJp0l2eudJ6PW1aeffZZrFzp9FYe7SVf3RGkDQ7CJpiIIz/sFTneUGTJtwMa0S0Q58x5WbvjpvDg9VwCjWhLsVxMr3zcHusLjm/j5d71HmhAZwQxOhPTCGkDUMRhjvRCIEe77tqmfHVGECc656Fsj/UFRzdfaeusaxGIK+mV6QYgPwxMKziK4ePhByzV0mjrfn0cFclsHUcE6UqxCj+ZPR2PMCmOPXv2xKeD+HoySHt7e/wc+Fx04mjXXctnUpdAtFYOGsXG9lj2AEK4g4PPwYS5TrfxOnQskpbPZO4UKzlY+FMUBM0iq1WmOXLkCEKBxvree++FKShAVrkc4La8TUMdEaSwuSvW422IIzRMR0K+J470SnJ/NnUIpBD/IQ1At3Hk/cn92ZyC/MyHRQa+uIg/vP0ndL29E01Tp2Do4hcQ3IQCuTh9CN/90Q9xV/PtmHvzTFgm92dThwcZgiV29e/DK9FbsUhSPnsjwmdvmq8wh+RByNy5c2GSa2fejKnL5+C6u6/efUORbF/wC9x1w+2wRO7hxVwplon5+7H4yyfvYdOxP48QB7nxJxVM33h//IYIxcOoTmHc+uv7RoiDfDx4Gqt6XoZF+A+oIAd5PYg1g76rf+z9CNfc3hy/ITd1zot/FoqBgrj1V/ehuX3shiFF8sEZq9G4DTnI60EqsMSR8xM3oW743p3xm/TZG8dx6d1PINiBX0r8chodMcaCAvnOdLMp3jBy+ZC8ArFq0CdD+mY1t8/EwLYP8dXpQQjmaF7cotLc2XFq5Si5spy8z8rZvef0JPQmtkx82WC0oNfwwPtVkIPMAkk66M5f4UwTf8P375S0SxOMFDc+PHtcn+EY0zlMm3VwMU8EcTZ6jCZNu6bcfVssFEm7skGPx6jhcDo1Fpm/yPM8U+ejx2hSEz/415MY3Of1LUtWadSEOwi/zDPtMCxFBBkO3+ypj85B84Mzce73ByWajAMjRfPimXE65WHUGE4FGSlVBBkOhUITP7i3T62TIpRRMFrc1KH6SncE0VeahYzkEUgFAcBvx+tb7xATn5B2wpmOCvkEcisCQUx8DQ96GlmpICOlTbHqwW9NrrL1TuoNFgZGBRkRgdQh7Z2wE//lyfMIFQ97GtYRgYxBOgBJX8K0KzQCM+HGEIFMQNo74bj9Q3feD9/hNO0Lh7sx7ZetKBEVZMT4PekhwGjCD9XP3v8tPr50Gr7CDWd8DpbHzb0muHKFSY6cP4ll/1yHxys/xuOzHoYvUBCbPtqFo5PYMiCMRASSAW77ZcpFkbicdnH35R9PvIVX+/ZByIYIJCNpLv/B2cMqojxsc5/1pKCAN3/09S3KQmOIQHLCDyJTmOUz2vFoywMomivCFZ+hhTwmPYIQww/lZpXjLzuwrjATz0jxyok3xYTXJ/OpJhJBNEKh0MQ/2rIYy9WylXZREIwa/PuFuohAXIKm+J1TPcZNPKMGfQbTPGFcChFIhEAmek1g2sTXO0RPGJNCBHICwoTw2z0uCWvqnRy90IdNx3aJz2iMs8iIpFiWSHsnv/v2zzOdUSs9jVxEyEjeFEtoAKZdrDLRlzSSdokJz01hHkTIQNo7mcjES09DG5mvhM7TB9FyD3VZST/8qw69XLd3IoOFWinkdPdcV1sJNSgA9k7Y5CMUzor3X6h7kr2Qmcxf5plTLJ5UNzQ0RJGUYl+IadKS7d9PHRKvoZczee4HyVvFiuDp+VgusqvvbxC0k8sK5N0wdQiC4Da5PqN5BSJGXXCdCDnIK5AqBG3IhaRGKDTFiiDVLC18eXoQZ3/zL7nLRDPKoFeRg1wmndUBVcmiQtsgZObSvj5cfON4HEF4aN1Xpwbj86rkSJ7cVJETHbNYNEFtEBqGUeNiVy8uHx4ZhHkW1+c9p+ID7Hi6upCZ3EUkHcf+7IbQEHGkUKnU2bXvfU0cw3/n4qvHcEb9DiOKkIncn00dEUQqWQ1w+fCnuND130kfkM3fO/PMe/EBdpJ2NUzuz2ZugSQ+pApJs8aFEeFCdy8+P3gKWWDaxWjD09flaoJJUc3TQU/RtR9kD0QgYzLchOeB0eQCPUvP/zD1p3MkmozPHmhAl0CY622CMAKmU4NvRmP6jKwwCnHRxDPtEuqixRtrEUgyuBhB9qjHpCac17uZhCXhS//4BDd13oPr7r4NwhWirNc+j0bn4dU7IMRRgw0/0+JIYdo18FJPnHpJtesKWtIronNPOkPacygpY/U0bCEmfgSboQltEUSFNJbUqighTKfOqahRlDhSUhPPCFbiaNKjK70iuu8H0RbafCBNp+gFXBo05LVx7J2UdK5rCzSiWyDdKMHwYtzl3nU0zv1dvsOQwmUnvmTXW1ehEa3nYiVNw3fUj0sQKJfe/ViJ45g3o+lp2vWFinYl6MR360yviImD42iQghNI0SY8LyUZgNReSdV+R2Eyf19FIKQ9DRdMeF4CH4Dsybv3ox6mjh6lktvgOY0OFvpCOgDJ+9EZTQJJu7Sa8xQjt9wqJXfD45MX+U17fvuHsQkPTRzDYTPz3EsHQzDxUfKZ047Ja6C97KxzsJDfrlmnbn0jNfGed+KfhyFMnu5Os74Gnhwsx3ItS7e++4ysMIpweTgAaSx6EGMCSUq+VLbTU762Bgt9wcMBSGPRg5hMsSgSRpEIjmJ7sNAXPBqANBo9iI0LdJ5W63U4hO89DVt4MABpNHoQoxGEKIVzyrcKR6AJD6GnYQuHByCNRw9iXCAJxpU+EWk6deHVo3KCYQYcHIBcBAtYEUjS4TTSyJkIXwYLfUHHAGR/fz9yon3maixsXuK5Xq2VsFj29W2w0BfyDkD29ecqikSwmJFYE0hS9qVh74JhxITbITXxzQ+22ByAfN5W9CC2PEhMYqqqyMC0KVMn/J2QBgt9gdGk0QHIe2bNQUa6bRjz4VgVSEInMmyqeuib94/7313d3VcW0gFI+r2JhNJ6/beQgQgFFHusCyQJj51okOUti3FX89fvFY9PLFTpVOiDhb4w0QDkNz64jAdaf4AMWE2tUppQEMqPpLNak+bfH/0Hj+94Bte23ISmqVNwuedU/IZIxHATNhevW3BHbOIZVdiD2rJmA9rb29EgW5Q4nkIBFCkQVrMOosHD5rZu3RovwT9Wr14drwaJ1GrVcc5uFgoTCFEiqaAmkoZKvwcOHMDatWt11NMFCyxcuBBPPvlk/NggFEVrEalVSqECIUokHchY+u3t7Y3FMjAwAME9ZsyYgXnz5sUrI522q1ajKVwgRIlkPUp8KqNQF5ry9SgYJwRClEi6Ueu0C8JuJY5lcACXBEIfsl+tBRDKTIQCTfloimgU1iV5QfitEUEoK5Fai1wRB3EmgqQklS1GkgqEMhGhJo4IDuGcQIiIpHREcFAcxEmBEBFJaYjgqDiIswIhIpLgieCwOIjTAiEikmCJ4Lg4iPMCISKS4IjggTiIM2Xe8UheSG7SjyD4ziF4Ig7ihUBI8oK2QtP910IhxKf++yIO4o1ACBtIyQhC4ccICQ3D2aoOl5qAk8ELD1KPZAqY5/56cTh2iaEgni56Kjcr3gqEiHl3ngge+Y16eJVijWaYLynkUDphXPietPosDuJ1BBlOknJxT0kFQpEwpepMzmT2nmAEQpKUaz1kX0lRVFETR4RA8DrFGg3fGFZKUDtWKIJgizRqeO036hGUQFKSigkbi17ek+gZ9Bqzfa1STURQKVY9pNJljCpqvY0qAibICDKcJO3irZSSdukhUmtZkk5VETjBCySFKYAIJRdpw292KBWqyRB8ijUWUhaeNBFqXm6zb2MiOiitQFISofCMYDlNZSRVtXaEar4nS+kFkqKE0qYeOlDuHgojRBW1w6KrEEQgo0mqXm0oV1SpqrUHtQtq5OahYYhAxiERC4/dX4LwvEqEmrfgKYY9EOoiApkkiViWoiaWNvhJFbVIsTu0jrcpRCAZSI5JZfpFwcyHm4JhqsTIwC2uLMv2SPrUOCIQTSQmn6KpoCYa/mxrMxc/+BGuCqLKP4sg8iMCMciwSDP8cVbyOHrVIxr1c/rnE7gqiDOSLpnj/8EQWj7GK3LoAAAAAElFTkSuQmCC" + } else { + inlineImage["content"] = "iVBORw0KGgoAAAANSUhEUgAAALAAAACwCAYAAACvt+ReAAAACXBIWXMAAAsTAAALEwEAmpwYAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAABHlSURBVHgB7Z1tjFxV/ce/U7ZGxa3bEsrflr+d5o+x5W9tN/ZJAu60PFi1ursm9SnSbuWFwZIAMTFAom6jKdEX0iYg+AK7SzRqfNHWmABa2qmokW61iwq7QA23L7oiaLvsQhG3MJ7vnXOXu7t3Zu7MfTrn3PNpTnd3Zpru7nznN9/fwzm3AEtoKpVKUXzwr2VidchVlA/zvg5ivMY6LZbjrUKh4MASigIscxBCpQDXyLVafiyitjCTYBhVQR+Tnw8LYY/DMgMrYEwLtgdVoXbjrWiqGsNyHUJV0A5yTm4FLERbQlW0XagKV0coZkbog0LMZeSQXAnYJ9odSNcOpIEjVlmswTyJ2XgBGy7aWjioinmfEPMwDMZIAUtP24eqny0h31DAFPIADMQoAUvh3irWbchPtA2Lg2pU3m1S8meEgGV9th9Vm2BpzAAMEbLWApbC3Q9rE1plAJoLeR40hMIVa0B8+jyseKPQJ9bz4ne5XwYD7dAqAluPmzj9qCZ82nT8tBGwEC9LYfdA3S6ZKTio2ooBaIDyArY+NzMGoIE/VtoDC/HSLpyEFW8W9Il1UjwH34TCKBmBbdRVjrJYO1WMxspFYBt1laSEajTug2IoI2BWGMRikrYXtsKgInxOWG67R1aDlEAJCyEtw1HYCoMuOGJtUsFSZB6BZXmMlqEIiy4UUbUUPciYTAUsM9wDsJZBR/icHci6SpGZhZB+9zZYTGCvsBO3IwNSF7BMABh1S7CYxEFUS22ptqFTFbBM1viDrobBjIyM4Pjx4zhz5oz79dKlS7F+/XqsXLkShsPh+d40k7vUBJyHSgNFe++997ofg6CQb7nlFvT29sJgHKRYoUhFwKaLd2JiAnfddRcOHz4c6vEUMIVMQRuKg5REnLiATRfv6Ogodu3aNW0XwkLxPvTQQ1bEEUm0jGa6eGkVbrzxxqbFS/hvtm/f7vplQymKdTTpQfnEBCyrDUzYijCQwcFBV4CTk5NoFYqYduLAgQMwlCKqteLE6vyJWQjxTfNZybxTkwRM1LjihJ6Yy1DKwkpsQgIkEoFlk8JI8e7Zsyd28ZIkXhQKUZKaiJ3YI7BsLfbDMFhpuPvuuxN/u6ctYUXDUPpFJN6NGIlVwHK4wzhDR/Hu2LEjtYTruuuucyP9ggULYCBsdBxETMQmYJltcqrMqMEcr1rQSqUhCuza0VIYWGZjq7kzrvJaLAKWWaZxI5FZidfD4Fqxg6qII89NxJXE0fcWYRBsUPT09GQmXpL1CyhBiqhqJjKRBSz3SRk1FvnYY4+5DYooNd648GrFBjY8bhPaiaybSBbCRN/LKsOdd94JFWEVxLBBoMh+OKqA2SYuwRB0qMUa2PCI1ORo2ULIem8JhqBLI8HAhkcpipVoKQKbZh1Yc2W2rxOGReKWrUSrAh6AIYdJ0+/qOkxDP0xfbAgtWYmmBSyrDvuhOWl315KCDQ9OxhnStWu6S9eUgE1pWLA0xbdfU0pTBjU8HDTZ4Gg2ieO5ZUVojImD5AY1PIposqcQOgLLxO15aIzBnS0XQyJxUwldMxG4HxrD1rDJ4iVe167WrmhNoE0N3WYOFYF1j758QrnxUoXWcFoY0LVbHiYKh43A/dAUlsii7l3TEZYHNW94hKp0NYzAOkdflpcMqpO2hOYNj4ZROEwE7oeGMPrkXbxE89ZzQy9cNwLrGn0N3yDZEhp37RbWqws3isD90Iykdg3rDnMBipgdSM2oWxduFIEZfYvQBJ3nGtJCw1oxo+/yWlG4ZgSWMw9FaACjiuEn3MSGhs0c1oX7at1Zz0LcCg3gE2HCUE6aaCji7lp3BFoIEX3XoDq0ozSmt4aThhNsLDVqcvA2T7osz76xVgRWfpOmFW90NLNegUeV1YrASidvVrzxo0HDY1xE4IWzb5wTgYV4S1BYvCqc12AiGtTOO6Q2ZxBkIfqgKBSvKuc1mIgGIp5jI+ZYCFXtA30amxRWvMmjcNdujo2YIWAZoo9CMZI8bKS9vR3bd2x3nzTdBsH5jsTy4X333he7peJlwRiNFdxrN6MaMVvAe6FY/TfJtzUKdvChQe33krmD7D29sb87KXpC5j4h4Okq2WwP3AWFSNqT7bpllxEnP/JnYDMnbhjdFaz2zNDotIDl5NkaKEIaCcWKFStgCj29yVzRQcGS5Rr/RWP8EThX4iUmXfo1yXcSBXdyT79a5wXdmCV2HFJNvJkTHj2rANPB1i/gTC/APTVxwRWubmeU5Qm2nu+44w48dWQELw2ddZ+zjJge7nGrENJTnENGnD/zGo7d9IT7Kn/lQ2fxsyd+gnOVs0iakVGzJthWrkjWEn1kQxd6Orbh/NGp6duWbL4Ma762Eu9c8g6kjLtTwxNwCRnWfw9v+y3Gn5lZAjr5xnEcefPRRIVsBRwO1spv+sCX8Z5ni5ianJpz/6VrF6HrhxuQMm49uE1+kVkC9+R3R+aIl3RetN5daQjZUpvuDb24+vy1eP2PU+CfIF46IezE5AXMb29DilCz0wIuIQPGn5nAcz9y6j6GIl4+7woceeNR/OlNrU+c0YrFCy7Dje/9EhaeXIzXawjXz0tD/3LtRIq4OZsn4GXIgKfvPxXqcR2FRfh02+exufJRPHjhPhuNE+ZjV27Fxn904aJnw0fU/0ymntC5riFTC/HyaHM7ZCnkr87/urUVCfH/l6/CDee34pJTi6EBRf7VJjtwWmFtRby8vfAObOnYig+/1oWpVxrbBUXgfHCREbgIDbG2Ih6uvrwL17+2FRdNttVM0hRGXwF7WFvxFix3hZ1IY5LGAHD5i5mkP3Ghv4A9rK2o7jJuJGCK/Kq2LmyetyWwpqsZ5giYeLai613XYuDcD6ytmAXtwsfbevDmGHS0C0Eso4CNuUysxyWvLHZtxdDFv8eRC48ERiVDruoTis6Vndh80RYsfHYx3oRRLKSA342M+M8rydYO1716Fd5XWYEj8+baCr6VmgZHKv1zu/wZt733C3j/C6tMsAtBvDvTCDw1kfwv1bMVH7y4E4fGf54LW0Hhdm/sxeoX1uP1Z6ZMsQtBFI20EEFc8eoKfGPZHjwy/ks8KpaJMAJvve6Twi581J0Ye91c4U6TGwGTV8dewzW4Fqvmd+JfG8ZgEgXx59NXbsM/fzzh/pw5oSNXAvagrej45SKM/O8pXPHFYtpTVLHDeeoTX/+LOxWWMzr0fuYiwmGi04fOYOXNV2BZt367kxl1n77/OTz3o9OmJmkNybWACd9uveh1pRByBjsLWuKfQ2fd7ztHdiGQ3AvYg5GYolA9Gl+YuICnHzjVcI46L1gB+/CiMcW89turlIvGp4RVoO3Jq10Iotmr1ecC2omHtxzDSMiB+6RhkvabLx13t19Z8c6EAh6HJRBGu0eEkJsdvI8Lblvni+jhjx3LY4UhDOO0EBRw7kppYaGtOPyZ37u+OM0kzyZpoRi3HjgkaSV5jLonvvFnjB15EZaGuAJ2YNBIZZIkXXKzSVrTuAJ+GZamYDTmoogZkaPCczH+/J0R63Ob52WbxEXAS/LOt+hTvSTtsW2/s+JtjXOehbC0CG0FS27NJnk2SYuF01bAMRE2ybNJWqw4VsAx4iV5Y0f/gdUBJzbaJC12rICTgNGVy0vybJKWGE5boVBwKpWKbWYkgDeuaX1uMlC73iyEA0siWPEmxjD/8gT8JCwWvTjNvzwBD8Ni0Ysy/7ICtujKDAthBZwA4ziHn1YGxS/3BCyx42rWnUbj1V5EJcKBHeqJjXLl1/gDfoN/iz+jlb+6Iu4pfFaUehbCEplhapaf+HdkHIIlMg7+hgcq9wiD9itXvP7b91b2uMK2RGbaMbQF3WhpHoq1XPmViLqP130chT1cGcKWwqewAh+ApSWmg61fwAfF2g9L09AePFI5NCPi1sPzxmuwFqXCDdZWNM/cCCx9MO9Q5qLfqkMhHqz8zLUHrUDhO5W/uSKmmC2hoP91vC9mbyk6BivghjDS/qHy+HSSFgXvRUBr0Ve42UbjxhzzfzF7W/1BWOpSTdK+NydJiwqF7CV5/4ZtP9dhhkZnRGBee9YO9gRDsdLnJl3T9ZI8aysCcahR/w1Bu5IHxboVlmloF+KOuPXwe2ub5M2gPPuGIAEzRFsBC17AmIi6v2g5SYuKl+RtLHwEG3E1LG5wncEcAVsb8VaSxqibNYzGtC5MGHOe5M2xD6TW2WiDyCn+JE0lvCSP7wj8PIeUg26sdTJP7myEF+lG8RRUhp0+zlbkMMnbF3RjoICljSiLT0vIAWknaVHJYZLH5kXgqEO9s9HYby7BYCgA1l2zStKiwiRvuHJCPEk3CCFfD4PZV+uOeucDD8DQU3uqNd1fYKDygLbi9cN3D/rjF/B3GAiTt4Fad9YUsJy3rKl8XaHHZZLWaGpMN2gr+HPRWhiW5JXr3dnoeNW9Yn0TBhB18EYXDBwQ2l3vzrqXGJBRWPuSGn0uo5Pp4vXwXqwcrNc8Gg/4J8+CCHONjH5oyvQuCI0qDHHCTqLmu0B2N3pAwxPa5ck9ZSRQkZi/YH4iF/wOuzsiL2g6INQw+pKwlxjYKdbziJm3vastdgHrVtNNCw1rxw2jLwklYBmF6YV3QFGyHrzRBSZ5o5WnsBHXxFo7vjjeyy2Eir6kmYu89IvVDcWGfFQavNEFDsx7tuJzhZ34H7wHihEq+pLQFzqUrwil6sKqDt7oQpy1444VCxATu8NGX9LsZbZYF6aNKCJD8lLTTYuoteP57fPFiuWKbQ6qGgtNU/+r3Ll8u/j0ADLCf+KNJT78m0t5glAR/xf633a8vx0xsds7cScsTV8rWfwHHLUsIwaWbL4s9GNrnXhjiRcKmTMizdiKS9ddghgYqDfzUIsCWkBE4aL4cBIRE7qXhs7i2E1P1H2MrelmS5jDV7oe3CBEvAgRYNTtbMb7erRkXGRZjZniPYgAf+hL1y4KvHZEnGcvWFrHG9mkkD++tBtvG3v7jPtZPosoXrKvFfGSliKwhxDxUUTs0M2Owm+0X8DjE0etcBXlqqXX4BNLujF1ouJ+vaz7cqz71ipEoCzEuwktElXARcRgJR7eUsaLS/6Onw/91FYWNOH6dTe4g/RX3bw+SgRu2Tp4NJ3E+ZH/ceiicy0++OD73OTMilcfLl7yTqz99qqo9uH2KOIlkSKwh4jErN1F3gR6ZuwM7v/+/Tg+dBxjY2OwqMe6devwlZu/4n6MCH3vbYhIXAKmhaCVKCImhoaGMPrMKCYnJ2HJhvb2dnd5bN68GQvaY+m4Oahah8hb1mIRMInLD1uMJ7Lv9RPJA/uR39BOWCz12RmXeElsAiaySxc5qbMYy26pkdiIzUL4iSupsxhFLEnbbBIRMImjyWExhoNCvL1IgCQFzGSOIraXLMg3vA53KY6KQxCxemA/8hvmq86BJa84YvUkJV6SWAT2kOU1RuIiLHnCEWtTnBWHIBIXMLEizh0OUhAvSUXAxIo4NzhISbwkMQ88G/kDcWzOXtLWXJiwpSZekpqAiU/E9np05lFGtdrgIEVSFTBhRiprgsYd3Zpj2KTYlGS1oRapC9hDdmVs21l/difRYQtLaklcLURy1yM+7IedYtMNRtudcc82NEvmAia2QqEdDlJO1mqRmYXwI38RnbC+WAf4HHWqIF6iRAT2I6JxH6rb9a2lUAtaBvrdpo5+ShrlBEykpaAvLsGiAmXEPIgeF0pYiNnwFyXPCmCVwshLfWkCf/e3yxKZAwVRMgL7kdG4Hwofrm0oZSgadf0oGYH9yGjch+p+OweWpHHE6lU56vpRPgL7kUPyLJobce06xfAubLk3i45aq2glYA9rK2JnAE2ejK4KWgrYwwo5MmVo4HProbwHrofPHy+HAVcUTZEBsZbr4nProXUEno0vInfBtqVno6XHbYRRAvYjO3o8myLvu6LLYh1C9Qh/42rqxgrYQwiZAmblIk9R2btIO89jKMNgjBewHyHmkvjQBzPFnBvR+smVgP1IMXMWmWLW1WZwf+Ex5Ey0fnIrYD8y+aOIKejVUFfQDqp+lsI9aKKnbRYr4ABkx2+NXJ7dSFPUFKaDqlCflB+HrWDnYgXcBDJS+9cyVOeWg1YQ43hrus7xfX1afu0u3WuzafJf05durhLhbZAAAAAASUVORK5CYII=" + } + inlineImages = append(inlineImages, inlineImage) + err := emailUtil.SendTemplatedEmail([]string{to}, "ente", "verification@ente.io", + ente.OTTEmailSubject, templateName, map[string]interface{}{ + "VerificationCode": ott, + }, inlineImages) + if err != nil { + return stacktrace.Propagate(err, "") + } + return nil +} + +// onVerificationSuccess is called when the user has successfully verified their email address. +// source indicates where the user came from. It can be nil. +func (c *UserController) onVerificationSuccess(context *gin.Context, email string, source *string) (ente.EmailAuthorizationResponse, error) { + isTwoFactorEnabled := false + + userID, err := c.UserRepo.GetUserIDWithEmail(email) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + userID, _, err = c.createUser(email, source) + if err != nil { + return ente.EmailAuthorizationResponse{}, stacktrace.Propagate(err, "") + } + } else { + return ente.EmailAuthorizationResponse{}, stacktrace.Propagate(err, "") + } + } else { + isTwoFactorEnabled, err = c.UserRepo.IsTwoFactorEnabled(userID) + if err != nil { + return ente.EmailAuthorizationResponse{}, err + } + } + hasPasskeys, err := c.UserRepo.HasPasskeys(userID) + if err != nil { + return ente.EmailAuthorizationResponse{}, stacktrace.Propagate(err, "") + } + + // if the user has passkeys, we will prioritize that over secret TOTP + if hasPasskeys { + passKeySessionID, err := auth.GenerateURLSafeRandomString(PassKeySessionIDLength) + if err != nil { + return ente.EmailAuthorizationResponse{}, stacktrace.Propagate(err, "") + } + err = c.PasskeyRepo.AddPasskeyTwoFactorSession(userID, passKeySessionID, time.Microseconds()+TwoFactorValidityDurationInMicroSeconds) + if err != nil { + return ente.EmailAuthorizationResponse{}, stacktrace.Propagate(err, "") + } + return ente.EmailAuthorizationResponse{ID: userID, PasskeySessionID: passKeySessionID}, nil + } else { + if isTwoFactorEnabled { + twoFactorSessionID, err := auth.GenerateURLSafeRandomString(TwoFactorSessionIDLength) + if err != nil { + return ente.EmailAuthorizationResponse{}, stacktrace.Propagate(err, "") + } + err = c.TwoFactorRepo.AddTwoFactorSession(userID, twoFactorSessionID, time.Microseconds()+TwoFactorValidityDurationInMicroSeconds) + if err != nil { + return ente.EmailAuthorizationResponse{}, stacktrace.Propagate(err, "") + } + return ente.EmailAuthorizationResponse{ID: userID, TwoFactorSessionID: twoFactorSessionID}, nil + } + + } + + token, err := auth.GenerateURLSafeRandomString(TokenLength) + if err != nil { + return ente.EmailAuthorizationResponse{}, stacktrace.Propagate(err, "") + } + keyAttributes, err := c.UserRepo.GetKeyAttributes(userID) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + err = c.UserAuthRepo.AddToken(userID, auth.GetApp(context), token, + network.GetClientIP(context), context.Request.UserAgent()) + if err != nil { + return ente.EmailAuthorizationResponse{}, stacktrace.Propagate(err, "") + } + return ente.EmailAuthorizationResponse{ID: userID, Token: token}, nil + } else { + return ente.EmailAuthorizationResponse{}, stacktrace.Propagate(err, "") + } + } + encryptedToken, err := crypto.GetEncryptedToken(token, keyAttributes.PublicKey) + if err != nil { + return ente.EmailAuthorizationResponse{}, stacktrace.Propagate(err, "") + } + err = c.UserAuthRepo.AddToken(userID, auth.GetApp(context), token, + network.GetClientIP(context), context.Request.UserAgent()) + if err != nil { + return ente.EmailAuthorizationResponse{}, stacktrace.Propagate(err, "") + } + return ente.EmailAuthorizationResponse{ + ID: userID, + KeyAttributes: &keyAttributes, + EncryptedToken: encryptedToken, + }, nil + +} + +func convertStringToBytes(s string) []byte { + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + log.Fatal(err) + } + return b +} + +func convertBytesToString(b []byte) string { + return base64.StdEncoding.EncodeToString(b) +} diff --git a/server/pkg/controller/usercache/controller.go b/server/pkg/controller/usercache/controller.go new file mode 100644 index 000000000..b6645653c --- /dev/null +++ b/server/pkg/controller/usercache/controller.go @@ -0,0 +1,40 @@ +package usercache + +import ( + "context" + "github.com/ente-io/museum/ente/cache" + bonus "github.com/ente-io/museum/ente/storagebonus" + "github.com/ente-io/museum/pkg/repo" + "github.com/ente-io/museum/pkg/repo/storagebonus" + "github.com/ente-io/stacktrace" +) + +// Controller is the controller for the data cache. +// It contains all the repositories that are used by the controller. +// Avoid adding any direct dependencies to the other controller. +type Controller struct { + FileRepo *repo.FileRepository + StoreBonusRepo *storagebonus.Repository + UserCache *cache.UserCache +} + +func (c *Controller) GetActiveStorageBonus(ctx context.Context, userID int64) (*bonus.ActiveStorageBonus, error) { + // Check if the value is present in the cache + if bonus, ok := c.UserCache.GetBonus(userID); ok { + // Cache hit, update the cache asynchronously + go func() { + _, _ = c.getAndCacheActiveStorageBonus(ctx, userID) + }() + return bonus, nil + } + return c.getAndCacheActiveStorageBonus(ctx, userID) +} + +func (c *Controller) getAndCacheActiveStorageBonus(ctx context.Context, userID int64) (*bonus.ActiveStorageBonus, error) { + bonus, err := c.StoreBonusRepo.GetActiveStorageBonuses(ctx, userID) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + c.UserCache.SetBonus(userID, bonus) + return bonus, nil +} diff --git a/server/pkg/controller/usercache/count.go b/server/pkg/controller/usercache/count.go new file mode 100644 index 000000000..a0f3bb043 --- /dev/null +++ b/server/pkg/controller/usercache/count.go @@ -0,0 +1,27 @@ +package usercache + +import ( + "github.com/ente-io/museum/ente" + "github.com/ente-io/stacktrace" +) + +func (c *Controller) GetUserFileCountWithCache(userID int64, app ente.App) (int64, error) { + // Check if the value is present in the cache + if count, ok := c.UserCache.GetFileCount(userID, app); ok { + // Cache hit, update the cache asynchronously + go func() { + _, _ = c.getUserCountAndUpdateCache(userID, app) + }() + return count, nil + } + return c.getUserCountAndUpdateCache(userID, app) +} + +func (c *Controller) getUserCountAndUpdateCache(userID int64, app ente.App) (int64, error) { + count, err := c.FileRepo.GetFileCountForUser(userID, app) + if err != nil { + return 0, stacktrace.Propagate(err, "") + } + c.UserCache.SetFileCount(userID, count, app) + return count, nil +} diff --git a/server/pkg/controller/userentity/controller.go b/server/pkg/controller/userentity/controller.go new file mode 100644 index 000000000..827d8708a --- /dev/null +++ b/server/pkg/controller/userentity/controller.go @@ -0,0 +1,64 @@ +package authenticaor + +import ( + model "github.com/ente-io/museum/ente/userentity" + "github.com/ente-io/museum/pkg/repo/userentity" + "github.com/ente-io/museum/pkg/utils/auth" + "github.com/ente-io/stacktrace" + "github.com/google/uuid" + + "github.com/gin-gonic/gin" +) + +// Controller is interface for exposing business logic related to authenticator app +type Controller struct { + Repo *userentity.Repository +} + +// CreateKey stores an entity key for the given type +func (c *Controller) CreateKey(ctx *gin.Context, req model.EntityKeyRequest) error { + userID := auth.GetUserID(ctx.Request.Header) + return c.Repo.CreateKey(ctx, userID, req) +} + +// GetKey +func (c *Controller) GetKey(ctx *gin.Context, req model.GetEntityKeyRequest) (*model.EntityKey, error) { + userID := auth.GetUserID(ctx.Request.Header) + res, err := c.Repo.GetKey(ctx, userID, req.Type) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + return &res, nil +} + +// CreateEntity stores entity data for the given type +func (c *Controller) CreateEntity(ctx *gin.Context, req model.EntityDataRequest) (*model.EntityData, error) { + userID := auth.GetUserID(ctx.Request.Header) + id, err := c.Repo.Create(ctx, userID, req) + if err != nil { + return nil, stacktrace.Propagate(err, "failed to createEntity") + } + return c.Repo.Get(ctx, userID, id) +} + +// UpdateEntity... +func (c *Controller) UpdateEntity(ctx *gin.Context, req model.UpdateEntityDataRequest) (*model.EntityData, error) { + userID := auth.GetUserID(ctx.Request.Header) + err := c.Repo.Update(ctx, userID, req) + if err != nil { + return nil, stacktrace.Propagate(err, "failed to updateEntity") + } + return c.Repo.Get(ctx, userID, req.ID) +} + +// Delete... +func (c *Controller) Delete(ctx *gin.Context, entityID uuid.UUID) (bool, error) { + userID := auth.GetUserID(ctx.Request.Header) + return c.Repo.Delete(ctx, userID, entityID) +} + +// GetDiff returns diff of EntityData for the given type +func (c *Controller) GetDiff(ctx *gin.Context, req model.GetEntityDiffRequest) ([]model.EntityData, error) { + userID := auth.GetUserID(ctx.Request.Header) + return c.Repo.GetDiff(ctx, userID, req.Type, *req.SinceTime, req.Limit) +} diff --git a/server/pkg/external/wasabi/compliance.go b/server/pkg/external/wasabi/compliance.go new file mode 100644 index 000000000..a6148f06c --- /dev/null +++ b/server/pkg/external/wasabi/compliance.go @@ -0,0 +1,795 @@ +// S3 service style operations for Wasabi specific compliance functionality. +// +// This file contains various service operations for interacting with the Wasabi +// compliance functions. These are based on standard operations templates taken +// from the source code in the AWS S3 Go SDK (v1), and modified to use the +// custom payloads expected by Wasabi. +// +// # Wasabi Compliance +// +// Wasabi supports a compliance policy that prevents the deletion of objects. +// +// Compliance is different from the object lock setting for a bucket, and is +// mutually exclusive with it - a particular bucket can have only one of these +// enabled at a time. +// +// There are compliance settings on a bucket level, which apply the policy that +// is applied to all objects added to that bucket. In addition, there are also +// compliance settings at the object level. +// +// Operations +// +// - GetBucketCompliance +// +// - PutBucketCompliance +// +// - GetObjectCompliance +// +// - PutObjectCompliance +// +// References +// +// - Blog post: Custom S3 requests with AWS Go SDK +// https://ente.io/blog/tech/custom-s3-requests/ +// +// - AWS Go SDK examples: +// https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/common-examples.html +// +// - AWS Go SDK API template: +// https://github.com/aws/aws-sdk-go/blob/main/service/s3/api.go +// +// - Wasabi Compliance: +// https://wasabi.com/wp-content/themes/wasabi/docs/API_Guide/index.html#t=topics%2FCompliance.htm&rhsyns=%20 +// +// - Wasabi Compliance - Operations on objects: +// https://wasabi.com/wp-content/themes/wasabi/docs/API_Guide/index.html#t=topics%2FCompliance1.htm%23XREF_26008_Compliance&rhsyns=%20 +package wasabi + +import ( + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/service/s3" +) + +// newRequest creates a new request for a S3 operation and runs any +// custom request initialization. +func newRequest(c *s3.S3, op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + // if initRequest != nil { + // initRequest(req) + // } + + return req +} + +const opGetBucketCompliance = "GetBucketCompliance" + +// GetBucketCompliance API operation for Wasabi S3 API. +// +// Returns the compliance state of a bucket. +// +// See also: Wasabi compliance +func GetBucketCompliance(c *s3.S3, input *GetBucketComplianceInput) (*GetBucketComplianceOutput, error) { + req, out := GetBucketComplianceRequest(c, input) + return out, req.Send() +} + +// See also: GetBucketCompliance, Wasabi compliance +func GetBucketComplianceRequest(c *s3.S3, input *GetBucketComplianceInput) (req *request.Request, output *GetBucketComplianceOutput) { + op := &request.Operation{ + Name: opGetBucketCompliance, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?compliance", + } + + if input == nil { + input = &GetBucketComplianceInput{} + } + + output = &GetBucketComplianceOutput{} + req = newRequest(c, op, input, output) + return +} + +type GetBucketComplianceInput struct { + _ struct{} `locationName:"GetBucketComplianceRequest" type:"structure"` + + // The name of the bucket for which to get the compliance information. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation. +func (s GetBucketComplianceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +func (s GetBucketComplianceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketComplianceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketComplianceInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketComplianceInput) SetBucket(v string) *GetBucketComplianceInput { + s.Bucket = &v + return s +} + +// Example response: +// +// +// enabled +// 2016-11-07T15:08:05Z +// false +// 0 +// false +// false +// +type GetBucketComplianceOutput struct { + _ struct{} `type:"structure"` + + // The compliance state of the bucket. + Status *string `type:"string" enum:"BucketComplianceStatus"` + + // The time at which the compliance settings are "locked". + LockTime *string `type:"string"` + + // Minimum number of days that objects are retained after their creation + // date or release from conditional hold. + RetentionDays *int64 `type:"integer"` + + // Indicates if newly created objects are placed on conditional hold. + ConditionalHold *bool `type:"boolean"` + + // Indicates if objects should be deleted automatically at the end of the + // retention period. + DeleteAfterRetention *bool `type:"boolean"` +} + +// String returns the string representation. +func (s GetBucketComplianceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +func (s GetBucketComplianceOutput) GoString() string { + return s.String() +} + +// SetStatus sets the Status field's value. +func (s *GetBucketComplianceOutput) SetStatus(v string) *GetBucketComplianceOutput { + s.Status = &v + return s +} + +// SetLockTime sets the LockTime field's value. +func (s *GetBucketComplianceOutput) SetLockTime(v string) *GetBucketComplianceOutput { + s.LockTime = &v + return s +} + +// SetRetentionDays sets the RetentionDays field's value. +func (s *GetBucketComplianceOutput) SetRetentionDays(v int64) *GetBucketComplianceOutput { + s.RetentionDays = &v + return s +} + +// SetConditionalHold sets the ConditionalHold field's value. +func (s *GetBucketComplianceOutput) SetConditionalHold(v bool) *GetBucketComplianceOutput { + s.ConditionalHold = &v + return s +} + +// SetDeleteAfterRetention sets the DeleteAfterRetention field's value. +func (s *GetBucketComplianceOutput) SetDeleteAfterRetention(v bool) *GetBucketComplianceOutput { + s.DeleteAfterRetention = &v + return s +} + +const ( + // BucketComplianceStatusEnabled is a BucketComplianceStatus enum value + BucketComplianceStatusEnabled = "enabled" + + // BucketVersioningStatusDisabled is a BucketComplianceStatus enum value + BucketComplianceStatusDisabled = "disabled" +) + +// BucketComplianceStatus_Values returns all elements of the BucketComplianceStatus enum +func BucketComplianceStatus_Values() []string { + return []string{ + BucketComplianceStatusEnabled, + BucketComplianceStatusDisabled, + } +} + +const opPutBucketCompliance = "PutBucketCompliance" + +// PutBucketCompliance API operation for Wasabi. +// +// Sets the compliance state of an existing bucket. +// +// The compliance settings for a bucket are specified using the "?compliance" +// query string along with the com­pliance settings as the XML body in the +// request. For example: +// +// PUT http://s3.wasabisys.com/my-bucket?compliance HTTP/1.1 +// +// +// enabled +// off +// 365 +// true +// +// +// After compliance is enabled for a bucket, the policy is immediately applied +// to all objects in the bucket. An attempt to delete an object before the +// retention period will return an error. +// +// See also: Wasabi compliance +func PutBucketCompliance(c *s3.S3, input *PutBucketComplianceInput) (*PutBucketComplianceOutput, error) { + req, out := PutBucketComplianceRequest(c, input) + return out, req.Send() +} + +// See also: PutBucketCompliance, Wasabi compliance +func PutBucketComplianceRequest(c *s3.S3, input *PutBucketComplianceInput) (req *request.Request, output *PutBucketComplianceOutput) { + op := &request.Operation{ + Name: opPutBucketCompliance, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?compliance", + } + + if input == nil { + input = &PutBucketComplianceInput{} + } + + output = &PutBucketComplianceOutput{} + req = newRequest(c, op, input, output) + // req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + // req.Handlers.Build.PushBackNamed(request.NamedHandler{ + // Name: "contentMd5Handler", + // Fn: checksum.AddBodyContentMD5Handler, + // }) + return +} + +type PutBucketComplianceInput struct { + _ struct{} `locationName:"PutBucketComplianceRequest" type:"structure" payload:"BucketComplianceConfiguration"` + + // The bucket name. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // A container for the compliance configuration. + // + // BucketComplianceConfiguration is a required field + BucketComplianceConfiguration *BucketComplianceConfiguration `locationName:"BucketComplianceConfiguration" type:"structure" required:"true"` +} + +// A container for the bucket compliance configuration. +type BucketComplianceConfiguration struct { + _ struct{} `type:"structure"` + + // The compliance state of the bucket. + // + // Either "enabled" or "disabled" to turn compliance on and off, + // respectively. Enabling will immediately apply to all objects in the + // bucket. + Status *string `type:"string" enum:"BucketComplianceStatus"` + + // The time at which the compliance settings are "locked". + // + // The time at which the compliance settings are "locked"— the settings + // cannot be reduced by any API call. Once the settings are locked, they + // cannot be unlocked without the intervention of Wasabi Customer Support. + // The lock time allows you to support two use cases: + // + // 1) testing that your software works properly before locking the + // compliance feature; or + // + // 2) never locking which means that data can be deleted with an additional + // step of an administrator turning compliance off. + // + // The lock time parameter may be: + // + // - an ISO date (for example, 2016-11-07T15:08:05Z), + // + // - the string "now" to force immediate locking, or + // + // - the string "off" to not lock the compliance settings. This is the default. + LockTime *string `type:"string"` + + // An integer for the minimum number of days that objects are always + // retained after their creation date or release from conditional hold. You + // can extend the retention date for any individual object, but may not + // shorten the date. This parameter is always required. + RetentionDays *int64 `type:"integer"` + + // A Boolean value indicating if newly created objects are placed on + // conditional hold, meaning that they cannot be deleted until the + // con­ditional hold is explicitly turned off. The default is false if this + // parameter is not given. Note that this setting may be changed even after + // the settings are locked. + ConditionalHold *bool `type:"boolean"` + + // A Boolean value indicating if the object should be deleted automatically + // at the end of the retention period. The default is to not delete objects + // after the reten­tion period. Note that this setting may be changed even + // after the settings are locked. + DeleteAfterRetention *bool `type:"boolean"` +} + +// String returns the string representation. +func (s PutBucketComplianceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +func (s PutBucketComplianceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketComplianceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketComplianceInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.BucketComplianceConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("BucketComplianceConfiguration")) + } + if s.BucketComplianceConfiguration != nil { + if err := s.BucketComplianceConfiguration.Validate(); err != nil { + invalidParams.AddNested("BucketComplianceConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketComplianceInput) SetBucket(v string) *PutBucketComplianceInput { + s.Bucket = &v + return s +} + +// SetBucket sets the BucketComplianceConfiguration field's value. +func (s *PutBucketComplianceInput) SetBucketComplianceConfiguration(v BucketComplianceConfiguration) *PutBucketComplianceInput { + s.BucketComplianceConfiguration = &v + return s +} + +// String returns the string representation. +func (s BucketComplianceConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +func (s BucketComplianceConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BucketComplianceConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BucketComplianceConfiguration"} + if s.RetentionDays == nil { + invalidParams.Add(request.NewErrParamRequired("RetentionDays")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetStatus sets the Status field's value. +func (s *BucketComplianceConfiguration) SetStatus(v string) *BucketComplianceConfiguration { + s.Status = &v + return s +} + +// SetLockTime sets the LockTime field's value. +func (s *BucketComplianceConfiguration) SetLockTime(v string) *BucketComplianceConfiguration { + s.LockTime = &v + return s +} + +// SetRetentionDays sets the RetentionDays field's value. +func (s *BucketComplianceConfiguration) SetRetentionDays(v int64) *BucketComplianceConfiguration { + s.RetentionDays = &v + return s +} + +// SetConditionalHold sets the ConditionalHold field's value. +func (s *BucketComplianceConfiguration) SetConditionalHold(v bool) *BucketComplianceConfiguration { + s.ConditionalHold = &v + return s +} + +// SetDeleteAfterRetention sets the DeleteAfterRetention field's value. +func (s *BucketComplianceConfiguration) SetDeleteAfterRetention(v bool) *BucketComplianceConfiguration { + s.DeleteAfterRetention = &v + return s +} + +type PutBucketComplianceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +func (s PutBucketComplianceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +func (s PutBucketComplianceOutput) GoString() string { + return s.String() +} + +const opGetObjectCompliance = "GetObjectCompliance" + +// GetObjectCompliance API operation for Wasabi S3 API. +// +// Returns the compliance state of an object. +// +// The compliance settings for any specific object can also be retrieved using +// the "?compliance" query string. In addition to the object compliance settings +// above, the query returns the calculated SHA256 hash for the object, which can +// be used to determine that the object has not been modified. Note that the +// SHA256 value is only available for objects that are uploaded as a single +// object and is not available for multi-part or composed objects. +// +// The following is an example of getting the compliance on an object: +// +// GET http://s3.wasabisys.com/my-bucket/my-object?compliance HTTP/1.1 +// +// +// 2016-10-31T15:08:05Z +// false +// false +// 14b4be3894e92166b508007b6c2e4fb6e88d3d0ad652c76475089a50ebe6e33b +// +// +// See also: Wasabi compliance +func GetObjectCompliance(c *s3.S3, input *GetObjectComplianceInput) (*GetObjectComplianceOutput, error) { + req, out := GetObjectComplianceRequest(c, input) + return out, req.Send() +} + +// See also: GetObjectCompliance, Wasabi compliance +func GetObjectComplianceRequest(c *s3.S3, input *GetObjectComplianceInput) (req *request.Request, output *GetObjectComplianceOutput) { + op := &request.Operation{ + Name: opGetObjectCompliance, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}?compliance", + } + + if input == nil { + input = &GetObjectComplianceInput{} + } + + output = &GetObjectComplianceOutput{} + req = newRequest(c, op, input, output) + return +} + +type GetObjectComplianceInput struct { + _ struct{} `locationName:"GetObjectComplianceRequest" type:"structure"` + + // The bucket name of the bucket containing the object. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Key name of the object to get the compliance information of. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` +} + +// String returns the string representation. +func (s GetObjectComplianceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +func (s GetObjectComplianceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectComplianceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectComplianceInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetObjectComplianceInput) SetBucket(v string) *GetObjectComplianceInput { + s.Bucket = &v + return s +} + +// SetKey sets the Key field's value. +func (s *GetObjectComplianceInput) SetKey(v string) *GetObjectComplianceInput { + s.Key = &v + return s +} + +// See also: PutObjectComplianceInput +type GetObjectComplianceOutput struct { + _ struct{} `type:"structure"` + + // The time before which the object cannot be deleted. + RetentionTime *string `type:"string"` + + // Indicates if the object is under conditional hold. + ConditionalHold *bool `type:"boolean"` + + // Indicates if the object is under legal hold. + LegalHold *bool `type:"boolean"` + + // The calculated SHA256 hash for the object + SHA256 *string `type:"string"` +} + +// String returns the string representation. +func (s GetObjectComplianceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +func (s GetObjectComplianceOutput) GoString() string { + return s.String() +} + +// SetRetentionTime sets the RetentionTime field's value. +func (s *GetObjectComplianceOutput) SetRetentionTime(v string) *GetObjectComplianceOutput { + s.RetentionTime = &v + return s +} + +// SetConditionalHold sets the ConditionalHold field's value. +func (s *GetObjectComplianceOutput) SetConditionalHold(v bool) *GetObjectComplianceOutput { + s.ConditionalHold = &v + return s +} + +// SetConditionalHold sets the ConditionalHold field's value. +func (s *GetObjectComplianceOutput) SetLegalHold(v bool) *GetObjectComplianceOutput { + s.LegalHold = &v + return s +} + +// SetRetentionTime sets the SHA256 field's value. +func (s *GetObjectComplianceOutput) SetSHA256(v string) *GetObjectComplianceOutput { + s.SHA256 = &v + return s +} + +const opPutObjectCompliance = "PutObjectCompliance" + +// PutObjectCompliance API operation for Wasabi. +// +// Sets the compliance state of an existing object. +// +// The compliance settings for any one object in a bucket with compliance can +// also be changed within the lim­its of the compliance on the bucket. +// +// See also: PutObjectComplianceInput, Wasabi compliance +func PutObjectCompliance(c *s3.S3, input *PutObjectComplianceInput) (*PutObjectComplianceOutput, error) { + req, out := PutObjectComplianceRequest(c, input) + return out, req.Send() +} + +// See also: PutObjectCompliance, Wasabi compliance +func PutObjectComplianceRequest(c *s3.S3, input *PutObjectComplianceInput) (req *request.Request, output *PutObjectComplianceOutput) { + op := &request.Operation{ + Name: opPutObjectCompliance, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}?compliance", + } + + if input == nil { + input = &PutObjectComplianceInput{} + } + + output = &PutObjectComplianceOutput{} + req = newRequest(c, op, input, output) + // req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + // req.Handlers.Build.PushBackNamed(request.NamedHandler{ + // Name: "contentMd5Handler", + // Fn: checksum.AddBodyContentMD5Handler, + // }) + return +} + +// The following is an example of setting the compliance on an object: +// +// PUT http://s3.wasabisys.com/my-bucket/my-object?compliance HTTP/1.1 +// +// +// false +// 2018-03-13T10:45:00Z +// +type PutObjectComplianceInput struct { + _ struct{} `locationName:"PutObjectComplianceRequest" type:"structure" payload:"ObjectComplianceConfiguration"` + + // The bucket name of the bucket containing the object. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Key name of the object to put the compliance information to. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // A container for object compliance configuration. + // + // ObjectComplianceConfiguration is a required field + ObjectComplianceConfiguration *ObjectComplianceConfiguration `locationName:"ObjectComplianceConfiguration" type:"structure" required:"true"` +} + +// A container for object compliance configuration. +type ObjectComplianceConfiguration struct { + _ struct{} `type:"structure"` + + // An ISO time giving a new retention time for the object in which the + // object cannot be deleted before this time. Note that the new retention + // time must be past the reten­tion period given by the bucket policy or an + // error is returned. + RetentionTime *string `type:"string"` + + // A Boolean value "false" to release the object from the conditional hold + // setting in the bucket policy. The retention period in days is started + // from the point when the con­ditional hold is released. Once the + // conditional hold is set false, it may not be returned to conditional + // hold. + ConditionalHold *bool `type:"boolean"` + + // A Boolean value "true" or "false" to set the legal hold status. When an + // object has a legal hold status of true, the object cannot be deleted + // regardless of the retention period. + LegalHold *bool `type:"boolean"` +} + +// String returns the string representation. +func (s PutObjectComplianceInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +func (s PutObjectComplianceInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutObjectComplianceInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutObjectComplianceInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Bucket != nil && len(*s.Bucket) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Bucket", 1)) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.ObjectComplianceConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("ObjectComplianceConfiguration")) + } + if s.ObjectComplianceConfiguration != nil { + if err := s.ObjectComplianceConfiguration.Validate(); err != nil { + invalidParams.AddNested("ObjectComplianceConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutObjectComplianceInput) SetBucket(v string) *PutObjectComplianceInput { + s.Bucket = &v + return s +} + +// SetKey sets the Key field's value. +func (s *PutObjectComplianceInput) SetKey(v string) *PutObjectComplianceInput { + s.Key = &v + return s +} + +// String returns the string representation. +func (s ObjectComplianceConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +func (s ObjectComplianceConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ObjectComplianceConfiguration) Validate() error { + return nil +} + +// SetRetentionTime sets the RetentionTime field's value. +func (s *ObjectComplianceConfiguration) SetRetentionTime(v string) *ObjectComplianceConfiguration { + s.RetentionTime = &v + return s +} + +// SetConditionalHold sets the ConditionalHold field's value. +func (s *ObjectComplianceConfiguration) SetConditionalHold(v bool) *ObjectComplianceConfiguration { + s.ConditionalHold = &v + return s +} + +// SetLegalHold sets the LegalHold field's value. +func (s *ObjectComplianceConfiguration) SetLegalHold(v bool) *ObjectComplianceConfiguration { + s.LegalHold = &v + return s +} + +type PutObjectComplianceOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation. +func (s PutObjectComplianceOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation. +func (s PutObjectComplianceOutput) GoString() string { + return s.String() +} diff --git a/server/pkg/external/zoho/api.go b/server/pkg/external/zoho/api.go new file mode 100644 index 000000000..3fb5a374e --- /dev/null +++ b/server/pkg/external/zoho/api.go @@ -0,0 +1,155 @@ +// The zoho package contains wrappers for the (generic) Zoho API. +// +// These are stateless functions that wrap over the HTTP calls that need to be +// made to obtain data from the Zoho HTTP API. In particular, they contain the +// code for dealing with access tokens and their renewal. +package zoho + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + + "github.com/ente-io/stacktrace" + log "github.com/sirupsen/logrus" +) + +// The minimum credentials we need to obtain valid access tokens. +// +// To generate these credentials: +// +// Create new client in https://api-console.zoho.com/. Use client type +// "Self-client". This gives us the client id and client secret. +// +// Generate an (authorization) code with scope "ZohoCampaigns.contact.WRITE" and +// portal "Campaigns". +// +// Use this authorization code to obtain a refresh/access token pair. Note that +// we don't have a redirect_uri, so we just use the dummy one that is given in +// their documentation examples elsewhere. +// (https://www.zoho.com/accounts/protocol/oauth/web-apps/access-token.html) +// +// curl -X POST \ +// 'https://accounts.zoho.com/oauth/v2/token? \ +// client_id=xxx&grant_type=authorization_code&client_secret=yyy \ +// &redirect_uri=https://www.zylker.com/oauthredirect&code=zzz' +// +// Save the refresh token. We can later use it to regenerate the access token +// (Zoho access tokens have a short, 1 hour validity anyway). +type Credentials struct { + ClientID string + ClientSecret string + RefreshToken string +} + +// Do an HTTP `method` request to `url` using the given accessToken. +// +// If the accessToken has expired, use the given credentials to renew it. +// +// Return the accessToken (renewed or original) that gets used, and any errors +// that occurred. If the API returns `status` "success", then error will be nil. +func DoRequest(method string, url string, accessToken string, credentials Credentials) (string, error) { + ar, err := doRequestNoRetry(method, url, accessToken, credentials) + if err != nil { + return accessToken, stacktrace.Propagate(err, "") + } + + // Code 1007 indicates that the access token has expired + // ("message":"Unauthorized request.") + if ar.Status == "error" && ar.Code == "1007" { + accessToken, err = renewAccessToken(credentials) + if err != nil { + return accessToken, stacktrace.Propagate(err, "") + } + + // Try again + ar, err = doRequestNoRetry(method, url, accessToken, credentials) + if err != nil { + return accessToken, stacktrace.Propagate(err, "") + } + } + + if ar.Status == "success" { + return accessToken, nil + } + + // Something else went wrong + return accessToken, stacktrace.NewError( + "Zoho API returned an non-success status %s (code %s: %s)", + ar.Status, ar.Code, ar.Message) +} + +// The basic generic fields that we expect in a response from Zoho APIs +type genericAPIResponse struct { + Status string `json:"status"` + Code string `json:"Code"` + Message string `json:"message"` +} + +func doRequestNoRetry(method string, url string, accessToken string, credentials Credentials) (genericAPIResponse, error) { + var ar genericAPIResponse + + client := &http.Client{} + req, err := http.NewRequest(method, url, nil) + if err != nil { + return ar, stacktrace.Propagate(err, "") + } + + req.Header.Set("Accept", "application/json") + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", accessToken)) + res, err := client.Do(req) + if err != nil { + return ar, stacktrace.Propagate(err, "") + } + + if res.Body != nil { + defer res.Body.Close() + } + + body, err := io.ReadAll(res.Body) + if err != nil { + return ar, stacktrace.Propagate(err, "") + } + + log.Infof("Zoho %s %s response: %s", method, url, body) + + err = json.Unmarshal(body, &ar) + return ar, stacktrace.Propagate(err, "") +} + +// Obtain a new access token using the given credentials +func renewAccessToken(credentials Credentials) (string, error) { + // https://www.zoho.com/crm/developer/docs/api/v3/refresh.html + url := fmt.Sprintf( + "https://accounts.zoho.com/oauth/v2/token?refresh_token=%s&client_id=%s&client_secret=%s&grant_type=refresh_token", + credentials.RefreshToken, credentials.ClientID, credentials.ClientSecret) + + type jsonResponse struct { + AccessToken string `json:"access_token"` + } + + res, err := http.Post(url, "application/json", nil) + if err != nil { + return "", stacktrace.Propagate(err, "") + } + + if res.Body != nil { + defer res.Body.Close() + } + + body, err := io.ReadAll(res.Body) + if err != nil { + return "", stacktrace.Propagate(err, "") + } + + var jr jsonResponse + err = json.Unmarshal(body, &jr) + if err != nil { + return "", stacktrace.Propagate(err, "") + } + + log.Infof("Renewed Zoho access token") + + return jr.AccessToken, nil +} diff --git a/server/pkg/middleware/access_token.go b/server/pkg/middleware/access_token.go new file mode 100644 index 000000000..638a0895f --- /dev/null +++ b/server/pkg/middleware/access_token.go @@ -0,0 +1,187 @@ +package middleware + +import ( + "bytes" + "context" + "crypto/sha256" + "fmt" + "net/http" + + "github.com/ente-io/museum/ente" + "github.com/ente-io/museum/pkg/controller" + "github.com/ente-io/museum/pkg/controller/discord" + "github.com/ente-io/museum/pkg/repo" + "github.com/ente-io/museum/pkg/utils/array" + "github.com/ente-io/museum/pkg/utils/auth" + "github.com/ente-io/museum/pkg/utils/network" + "github.com/ente-io/museum/pkg/utils/time" + "github.com/ente-io/stacktrace" + "github.com/gin-gonic/gin" + "github.com/patrickmn/go-cache" + "github.com/sirupsen/logrus" +) + +var passwordWhiteListedURLs = []string{"/public-collection/info", "/public-collection/report-abuse", "/public-collection/verify-password"} +var whitelistedCollectionShareIDs = []int64{111} + +// AccessTokenMiddleware intercepts and authenticates incoming requests +type AccessTokenMiddleware struct { + PublicCollectionRepo *repo.PublicCollectionRepository + PublicCollectionCtrl *controller.PublicCollectionController + CollectionRepo *repo.CollectionRepository + Cache *cache.Cache + BillingCtrl *controller.BillingController + DiscordController *discord.DiscordController +} + +// AccessTokenAuthMiddleware returns a middle ware that extracts the `X-Auth-Access-Token` +// within the header of a request and uses it to validate the access token and set the +// ente.PublicAccessContext with auth.PublicAccessKey as key +func (m *AccessTokenMiddleware) AccessTokenAuthMiddleware(urlSanitizer func(_ *gin.Context) string) gin.HandlerFunc { + return func(c *gin.Context) { + accessToken := auth.GetAccessToken(c) + if accessToken == "" { + c.AbortWithStatusJSON(http.StatusUnauthorized, gin.H{"error": "missing accessToken"}) + return + } + clientIP := network.GetClientIP(c) + userAgent := c.GetHeader("User-Agent") + var publicCollectionSummary ente.PublicCollectionSummary + var err error + + cacheKey := computeHashKeyForList([]string{accessToken, clientIP, userAgent}, ":") + cachedValue, cacheHit := m.Cache.Get(cacheKey) + if !cacheHit { + publicCollectionSummary, err = m.PublicCollectionRepo.GetCollectionSummaryByToken(c, accessToken) + if err != nil { + c.AbortWithStatusJSON(http.StatusUnauthorized, gin.H{"error": "invalid token"}) + return + } + if publicCollectionSummary.IsDisabled { + c.AbortWithStatusJSON(http.StatusGone, gin.H{"error": "disabled token"}) + return + } + // validate if user still has active paid subscription + if err = m.validateOwnersSubscription(publicCollectionSummary.CollectionID); err != nil { + logrus.WithError(err).Warn("failed to verify active paid subscription") + c.AbortWithStatusJSON(http.StatusGone, gin.H{"error": "no active subscription"}) + return + } + + // validate device limit + reached, err := m.isDeviceLimitReached(c, publicCollectionSummary, clientIP, userAgent) + if err != nil { + logrus.WithError(err).Error("failed to check device limit") + c.AbortWithStatusJSON(http.StatusInternalServerError, gin.H{"error": "something went wrong"}) + return + } + if reached { + c.AbortWithStatusJSON(http.StatusTooManyRequests, gin.H{"error": "reached device limit"}) + return + } + } else { + publicCollectionSummary = cachedValue.(ente.PublicCollectionSummary) + } + + if publicCollectionSummary.ValidTill > 0 && // expiry time is defined, 0 indicates no expiry + publicCollectionSummary.ValidTill < time.Microseconds() { + c.AbortWithStatusJSON(http.StatusGone, gin.H{"error": "expired token"}) + return + } + + // checks password protected public collection + if publicCollectionSummary.PassHash != nil && *publicCollectionSummary.PassHash != "" { + reqPath := urlSanitizer(c) + if err = m.validatePassword(c, reqPath, publicCollectionSummary); err != nil { + logrus.WithError(err).Warn("password validation failed") + c.AbortWithStatusJSON(http.StatusUnauthorized, gin.H{"error": err}) + return + } + } + + if !cacheHit { + m.Cache.Set(cacheKey, publicCollectionSummary, cache.DefaultExpiration) + } + + c.Set(auth.PublicAccessKey, ente.PublicAccessContext{ + ID: publicCollectionSummary.ID, + IP: clientIP, + UserAgent: userAgent, + CollectionID: publicCollectionSummary.CollectionID, + }) + c.Next() + } +} +func (m *AccessTokenMiddleware) validateOwnersSubscription(cID int64) error { + userID, err := m.CollectionRepo.GetOwnerID(cID) + if err != nil { + return stacktrace.Propagate(err, "") + } + return m.BillingCtrl.HasActiveSelfOrFamilySubscription(userID) +} + +func (m *AccessTokenMiddleware) isDeviceLimitReached(ctx context.Context, + collectionSummary ente.PublicCollectionSummary, ip string, ua string) (bool, error) { + // skip deviceLimit check & record keeping for requests via CF worker + if network.IsCFWorkerIP(ip) { + return false, nil + } + if collectionSummary.DeviceLimit <= 0 { // no device limit was added + return false, nil + } + sharedID := collectionSummary.ID + hasAccessedInPast, err := m.PublicCollectionRepo.AccessedInPast(ctx, sharedID, ip, ua) + if err != nil { + return false, stacktrace.Propagate(err, "") + } + // if the device has accessed the url in the past, let it access it now as well, irrespective of device limit. + if hasAccessedInPast { + return false, nil + } + count, err := m.PublicCollectionRepo.GetUniqueAccessCount(ctx, sharedID) + if err != nil { + return false, stacktrace.Propagate(err, "failed to get unique access count") + } + + deviceLimit := int64(collectionSummary.DeviceLimit) + if deviceLimit == controller.DeviceLimitThreshold { + deviceLimit = controller.DeviceLimitThresholdMultiplier * controller.DeviceLimitThreshold + } + + if count >= controller.DeviceLimitWarningThreshold { + if !array.Int64InList(sharedID, whitelistedCollectionShareIDs) { + m.DiscordController.NotifyPotentialAbuse( + fmt.Sprintf("Album exceeds warning threshold: {CollectionID: %d, ShareID: %d}", + collectionSummary.CollectionID, collectionSummary.ID)) + } + } + + if count >= deviceLimit { + return true, nil + } + err = m.PublicCollectionRepo.RecordAccessHistory(ctx, sharedID, ip, ua) + return false, stacktrace.Propagate(err, "failed to record access history") +} + +// validatePassword will verify if the user is provided correct password for the public album +func (m *AccessTokenMiddleware) validatePassword(c *gin.Context, reqPath string, + collectionSummary ente.PublicCollectionSummary) error { + if array.StringInList(reqPath, passwordWhiteListedURLs) { + return nil + } + accessTokenJWT := auth.GetAccessTokenJWT(c) + if accessTokenJWT == "" { + return ente.ErrAuthenticationRequired + } + return m.PublicCollectionCtrl.ValidateJWTToken(c, accessTokenJWT, *collectionSummary.PassHash) +} + +func computeHashKeyForList(list []string, delim string) string { + var buffer bytes.Buffer + for i := range list { + buffer.WriteString(list[i]) + buffer.WriteString(delim) + } + sha := sha256.Sum256(buffer.Bytes()) + return fmt.Sprintf("%x\n", sha) +} diff --git a/server/pkg/middleware/auth.go b/server/pkg/middleware/auth.go new file mode 100644 index 000000000..9c4bb2280 --- /dev/null +++ b/server/pkg/middleware/auth.go @@ -0,0 +1,88 @@ +package middleware + +import ( + "fmt" + "net/http" + "strconv" + + "github.com/ente-io/museum/ente/jwt" + "github.com/ente-io/museum/pkg/utils/network" + + "github.com/ente-io/museum/pkg/controller/user" + "github.com/ente-io/museum/pkg/repo" + "github.com/ente-io/museum/pkg/utils/auth" + "github.com/gin-gonic/gin" + "github.com/patrickmn/go-cache" + "github.com/spf13/viper" +) + +// AuthMiddleware intercepts and authenticates incoming requests +type AuthMiddleware struct { + UserAuthRepo *repo.UserAuthRepository + Cache *cache.Cache + UserController *user.UserController +} + +// TokenAuthMiddleware returns a middle ware that extracts the `X-AuthToken` +// within the header of a request and uses it to authenticate and insert the +// authenticated user to the request's `X-Auth-User-ID` field. +// If isJWT is true we use JWT token validation +func (m *AuthMiddleware) TokenAuthMiddleware(jwtClaimScope *jwt.ClaimScope) gin.HandlerFunc { + return func(c *gin.Context) { + token := auth.GetToken(c) + if token == "" { + c.AbortWithStatusJSON(http.StatusUnauthorized, gin.H{"error": "missing token"}) + return + } + app := auth.GetApp(c) + cacheKey := fmt.Sprintf("%s:%s", app, token) + isJWT := false + if jwtClaimScope != nil { + isJWT = true + cacheKey = fmt.Sprintf("%s:%s:%s", app, token, *jwtClaimScope) + } + userID, found := m.Cache.Get(cacheKey) + var err error + if !found { + if isJWT { + userID, err = m.UserController.ValidateJWTToken(token, *jwtClaimScope) + } else { + userID, err = m.UserAuthRepo.GetUserIDWithToken(token, app) + } + if err != nil { + c.AbortWithStatusJSON(http.StatusUnauthorized, gin.H{"error": "invalid token"}) + return + } + if !isJWT { + ip := network.GetClientIP(c) + userAgent := c.Request.UserAgent() + // skip updating last used for requests routed via CF worker + if !network.IsCFWorkerIP(ip) { + go func() { + _ = m.UserAuthRepo.UpdateLastUsedAt(userID.(int64), token, ip, userAgent) + }() + } + } + m.Cache.Set(cacheKey, userID, cache.DefaultExpiration) + } + c.Request.Header.Set("X-Auth-User-ID", strconv.FormatInt(userID.(int64), 10)) + c.Next() + } +} + +// AdminAuthMiddleware returns a middle ware that extracts the `userID` added by the TokenAuthMiddleware +// within the header of a request and uses it to check admin status +// NOTE: Should be added after TokenAuthMiddleware middleware +func (m *AuthMiddleware) AdminAuthMiddleware() gin.HandlerFunc { + return func(c *gin.Context) { + userID := auth.GetUserID(c.Request.Header) + admins := viper.GetIntSlice("internal.admins") + for _, admin := range admins { + if int64(admin) == userID { + c.Next() + return + } + } + c.AbortWithStatusJSON(http.StatusUnauthorized, gin.H{"error": "insufficient permissions"}) + } +} diff --git a/server/pkg/middleware/cast_auth.go b/server/pkg/middleware/cast_auth.go new file mode 100644 index 000000000..851ed3957 --- /dev/null +++ b/server/pkg/middleware/cast_auth.go @@ -0,0 +1,53 @@ +package middleware + +import ( + "fmt" + castCtrl "github.com/ente-io/museum/pkg/controller/cast" + "github.com/ente-io/museum/pkg/utils/auth" + "github.com/gin-gonic/gin" + "github.com/patrickmn/go-cache" + "net/http" +) + +// CastMiddleware intercepts and authenticates incoming requests +type CastMiddleware struct { + Cache *cache.Cache + CastCtrl *castCtrl.Controller +} + +// CastAuthMiddleware returns a middle ware that extracts the `X-AuthToken` +// within the header of a request and uses it to authenticate and insert the +// authenticated user to the request's `X-Auth-User-ID` field. +// If isJWT is true we use JWT token validation +func (m *CastMiddleware) CastAuthMiddleware() gin.HandlerFunc { + return func(c *gin.Context) { + token := auth.GetCastToken(c) + if token == "" { + c.AbortWithStatusJSON(http.StatusUnauthorized, gin.H{"error": "cast access token missing"}) + return + } + app := auth.GetApp(c) + cacheKey := fmt.Sprintf("%s:%s:%s", app, token, "cast") + cachedCastCtx, found := m.Cache.Get(cacheKey) + if !found { + castCtx, err := m.CastCtrl.GetCollectionAndCasterIDForToken(c, token) + if err != nil { + c.AbortWithStatusJSON(http.StatusUnauthorized, gin.H{"error": "invalid token"}) + return + } + c.Set(auth.CastContext, castCtx) + m.Cache.Set(cacheKey, *castCtx, cache.DefaultExpiration) + c.Set(auth.CastContext, *castCtx) + } else { + c.Set(auth.CastContext, cachedCastCtx) + // validate async validate that the token is still active + go func() { + _, err := m.CastCtrl.GetCollectionAndCasterIDForToken(c, token) + if err != nil { + m.Cache.Delete(cacheKey) + } + }() + } + c.Next() + } +} diff --git a/server/pkg/middleware/rate_limit.go b/server/pkg/middleware/rate_limit.go new file mode 100644 index 000000000..391c01887 --- /dev/null +++ b/server/pkg/middleware/rate_limit.go @@ -0,0 +1,121 @@ +package middleware + +import ( + "fmt" + "net/http" + "strconv" + "strings" + + "github.com/ente-io/museum/pkg/controller/discord" + "github.com/ente-io/museum/pkg/utils/auth" + "github.com/ente-io/museum/pkg/utils/network" + + "github.com/gin-gonic/gin" + log "github.com/sirupsen/logrus" + "github.com/ulule/limiter/v3" + "github.com/ulule/limiter/v3/drivers/store/memory" +) + +type RateLimitMiddleware struct { + limit10ReqPerMin *limiter.Limiter + limit200ReqPerSec *limiter.Limiter + discordCtrl *discord.DiscordController +} + +func NewRateLimitMiddleware(discordCtrl *discord.DiscordController) *RateLimitMiddleware { + return &RateLimitMiddleware{ + limit10ReqPerMin: rateLimiter("10-M"), + limit200ReqPerSec: rateLimiter("200-S"), + discordCtrl: discordCtrl, + } +} + +// rateLimiter will return instance of limiter.Limiter based on internal - +// Examples: 5 reqs/sec: "5-S", 10 reqs/min: "10-M" +// 1000 reqs/hour: "1000-H", 2000 reqs/day: "2000-D" +// https://github.com/ulule/limiter/ +func rateLimiter(interval string) *limiter.Limiter { + store := memory.NewStore() + rate, err := limiter.NewRateFromFormatted(interval) + if err != nil { + panic(err) + } + instance := limiter.New(store, rate) + return instance +} + +// APIRateLimitMiddleware only rate limits sensitive public endpoints which have a higher risk +// of abuse by any bad actor. +func (r *RateLimitMiddleware) APIRateLimitMiddleware(urlSanitizer func(_ *gin.Context) string) gin.HandlerFunc { + return func(c *gin.Context) { + requestPath := urlSanitizer(c) + rateLimiter := r.getLimiter(requestPath, c.Request.Method) + if rateLimiter != nil { + key := fmt.Sprintf("%s-%s", network.GetClientIP(c), requestPath) + limitContext, err := rateLimiter.Get(c, key) + if err != nil { + log.Error("Failed to check rate limit", err) + c.Next() // assume that limit hasn't reached + return + } + if limitContext.Reached { + go r.discordCtrl.NotifyPotentialAbuse(fmt.Sprintf("Rate limit breached %s", requestPath)) + log.Error(fmt.Sprintf("Rate limit breached %s", key)) + c.AbortWithStatusJSON(http.StatusTooManyRequests, gin.H{"error": "Rate limit breached, try later"}) + return + } + } + c.Next() + } +} + +// APIRateLimitForUserMiddleware only rate limits sensitive authenticated endpoints which have a higher risk +// of abuse by any bad actor. +func (r *RateLimitMiddleware) APIRateLimitForUserMiddleware(urlSanitizer func(_ *gin.Context) string) gin.HandlerFunc { + return func(c *gin.Context) { + requestPath := urlSanitizer(c) + rateLimiter := r.getLimiter(requestPath, c.Request.Method) + if rateLimiter != nil { + userID := auth.GetUserID(c.Request.Header) + if userID == 0 { + // do not apply limit, just log + log.Error("userID must be present in request header for applying rate-limit") + return + } + limitContext, err := rateLimiter.Get(c, strconv.FormatInt(userID, 10)) + if err != nil { + log.Error("Failed to check rate limit", err) + c.Next() // assume that limit hasn't reached + return + } + if limitContext.Reached { + msg := fmt.Sprintf("Rate limit breached %d for path: %s", userID, requestPath) + go r.discordCtrl.NotifyPotentialAbuse(msg) + log.Error(msg) + c.AbortWithStatusJSON(http.StatusTooManyRequests, gin.H{"error": "Rate limit breached, try later"}) + return + } + } + c.Next() + } +} + +// getLimiter, based on reqPath & reqMethod, return instance of limiter.Limiter which needs to +// be applied for a request. It returns nil if the request is not rate limited +func (r *RateLimitMiddleware) getLimiter(reqPath string, reqMethod string) *limiter.Limiter { + if reqPath == "/users/ott" || + reqPath == "/users/verify-email" || + reqPath == "/public-collection/verify-password" || + reqPath == "/family/accept-invite" || + reqPath == "/users/srp/attributes" || + reqPath == "/users/srp/verify-session" || + reqPath == "/family/invite-info/:token" || + reqPath == "/family/add-member" || + strings.HasPrefix(reqPath, "/users/srp/") || + strings.HasPrefix(reqPath, "/users/two-factor/") { + return r.limit10ReqPerMin + } else if reqPath == "/files/preview" { + return r.limit200ReqPerSec + } + return nil +} diff --git a/server/pkg/middleware/recover.go b/server/pkg/middleware/recover.go new file mode 100644 index 000000000..bce492742 --- /dev/null +++ b/server/pkg/middleware/recover.go @@ -0,0 +1,88 @@ +package middleware + +import ( + "errors" + "fmt" + "net" + "net/http" + "net/http/httputil" + "os" + "runtime/debug" + "strings" + "syscall" + + "github.com/gin-contrib/requestid" + "github.com/gin-gonic/gin" + log "github.com/sirupsen/logrus" +) + +// PanicRecover is similar to Gin's CustomRecoveryWithWriter but with custom logger. +// There's no easy way to plugin application logger instance & log custom attributes (like requestID) +func PanicRecover() gin.HandlerFunc { + return func(c *gin.Context) { + defer func() { + if err := recover(); err != nil { + // Check for a broken connection, as it is not really a + // condition that warrants a panic stack trace. + // + // Newer versions of gin might fix this (the PR is not yet + // merged as on writing): + // https://github.com/gin-gonic/gin/pull/2150 + var brokenPipe bool + + // Legacy check, not sure if it ever worked. Retaining this, can + // remove both when the gin PR is merged. + if ne, ok := err.(*net.OpError); ok { + if se, ok := ne.Err.(*os.SyscallError); ok { + if strings.Contains(strings.ToLower(se.Error()), "broken pipe") || strings.Contains(strings.ToLower(se.Error()), "connection reset by peer") { + brokenPipe = true + } + } + } + + // Newer check. Also untested. + if !brokenPipe { + if re, ok := err.(error); ok { + if errors.Is(re, syscall.EPIPE) { + brokenPipe = true + } + } + } + + httpRequest, _ := httputil.DumpRequest(c.Request, false) + requestData := strings.Split(string(httpRequest), "\r\n") + for idx, header := range requestData { + current := strings.Split(header, ":") + if current[0] == "Authorization" || current[0] == "X-Auth-Token" { + requestData[idx] = current[0] + ": *" + } + } + reqDataWithoutAuthHeaders := strings.Join(requestData, "\r\n") + var logWithAttributes = log.WithFields(log.Fields{ + "request_data": reqDataWithoutAuthHeaders, + "req_id": requestid.Get(c), + "req_uri": c.Request.URL.Path, + "panic": err, + "broken_pipe": brokenPipe, + "stack": string(debug.Stack()), + }) + if brokenPipe { + log.Warn("Panic Recovery: Broken pipe") + // If the connection is dead, we can't write a status to it. + c.Error(err.(error)) // nolint: errcheck + c.Abort() + return + } + if fmt.Sprintf("%v", err) == "client disconnected" { + // https://github.com/gin-gonic/gin/issues/2279#issuecomment-768349478 + logWithAttributes.Warn("Client request cancelled") + c.Request.Context().Done() + } else { + logWithAttributes.Error("Recovery from Panic") + c.AbortWithStatus(http.StatusInternalServerError) + } + } + }() + c.Next() + } +} diff --git a/server/pkg/middleware/request_logger.go b/server/pkg/middleware/request_logger.go new file mode 100644 index 000000000..f80985036 --- /dev/null +++ b/server/pkg/middleware/request_logger.go @@ -0,0 +1,108 @@ +package middleware + +import ( + "bytes" + "io" + "net/http" + "net/url" + "strconv" + "time" + + "github.com/ente-io/museum/pkg/utils/network" + + "github.com/ente-io/museum/pkg/utils/auth" + "github.com/ente-io/stacktrace" + "github.com/gin-contrib/requestid" + + timeUtil "github.com/ente-io/museum/pkg/utils/time" + + "github.com/ente-io/museum/pkg/utils/handler" + "github.com/gin-gonic/gin" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/sirupsen/logrus" +) + +var latency = promauto.NewHistogramVec(prometheus.HistogramOpts{ + Name: "museum_latency", + Help: "The amount of time the server is taking to respond to requests", + Buckets: []float64{10, 50, 100, 200, 500, 1000, 10000, 30000, 60000, 120000, 600000}, +}, []string{"code", "method", "host", "url"}) + +// shouldSkipBodyLog returns true if the body should not be logged. +// This is useful for endpoints that receive large or sensitive payloads. +func shouldSkipBodyLog(method string, path string) bool { + if method == "PUT" && path == "/embeddings" { + return true + } + return false +} + +// Logger logs the details regarding an incoming request +func Logger(urlSanitizer func(_ *gin.Context) string) gin.HandlerFunc { + return func(c *gin.Context) { + startTime := time.Now() + reqID := requestid.Get(c) + buf, err := io.ReadAll(c.Request.Body) + if err != nil { + handler.Error(c, err) + } + rdr1 := io.NopCloser(bytes.NewBuffer(buf)) + // Creating a new Buffer, because rdr1 will be read + rdr2 := io.NopCloser(bytes.NewBuffer(buf)) + + userAgent := c.GetHeader("User-Agent") + clientVersion := c.GetHeader("X-Client-Version") + clientPkg := c.GetHeader("X-Client-Package") + clientIP := network.GetClientIP(c) + reqMethod := c.Request.Method + queryValues, _ := url.ParseQuery(c.Request.URL.RawQuery) + if queryValues.Has("token") { + queryValues.Set("token", "redacted-value") + } + queryParamsForLog := queryValues.Encode() + reqContextLogger := logrus.WithFields(logrus.Fields{ + "client_ip": clientIP, + "client_pkg": clientPkg, + "client_version": clientVersion, + "query": queryParamsForLog, + "req_id": reqID, + "req_method": reqMethod, + "req_uri": c.Request.URL.Path, + "ua": userAgent, + }) + if shouldSkipBodyLog(reqMethod, c.Request.URL.Path) { + reqContextLogger = reqContextLogger.WithField("req_body", "redacted") + } else { + body, err := readBody(rdr1) + if err != nil { + logrus.Error("Error reading body", err) + } + reqContextLogger = reqContextLogger.WithField("req_body", body) + } + reqContextLogger.Info("incoming") + c.Request.Body = rdr2 + // Processing request + c.Next() + statusCode := c.Writer.Status() + latencyTime := time.Since(startTime) + reqURI := urlSanitizer(c) + if reqMethod != http.MethodOptions { + latency.WithLabelValues(strconv.Itoa(statusCode), reqMethod, + c.Request.Host, reqURI). + Observe(float64(latencyTime.Milliseconds())) + } + reqContextLogger.WithFields(logrus.Fields{ + "latency_time": latencyTime, + "h_latency": timeUtil.HumanFriendlyDuration(latencyTime), + "status_code": statusCode, + "user_id": auth.GetUserID(c.Request.Header), + }).Info("outgoing") + } +} + +func readBody(reader io.Reader) (string, error) { + buf := new(bytes.Buffer) + _, err := buf.ReadFrom(reader) + return buf.String(), stacktrace.Propagate(err, "") +} diff --git a/server/pkg/repo/authenticator/entity.go b/server/pkg/repo/authenticator/entity.go new file mode 100644 index 000000000..d9a68e84e --- /dev/null +++ b/server/pkg/repo/authenticator/entity.go @@ -0,0 +1,116 @@ +package authenticator + +import ( + "context" + "database/sql" + "errors" + "fmt" + + model "github.com/ente-io/museum/ente/authenticator" + "github.com/ente-io/stacktrace" + "github.com/google/uuid" + "github.com/sirupsen/logrus" +) + +// Create inserts a new entry +func (r *Repository) Create(ctx context.Context, userID int64, entry model.CreateEntityRequest) (uuid.UUID, error) { + id := uuid.New() + err := r.DB.QueryRow(`INSERT into authenticator_entity( + id, + user_id, + encrypted_data, + header) VALUES ($1,$2,$3,$4) RETURNING id`, + id, //$1 id + userID, // $2 user_id + entry.EncryptedData, // $3 encrypted_data + entry.Header). // $4 header + Scan(&id) + if err != nil { + return id, stacktrace.Propagate(err, "Failed to createTotpEntry") + } + return id, nil +} + +func (r *Repository) Get(ctx context.Context, userID int64, id uuid.UUID) (model.Entity, error) { + res := model.Entity{} + row := r.DB.QueryRowContext(ctx, `SELECT + id, user_id, encrypted_data, header, is_deleted, created_at, updated_at + FROM authenticator_entity + WHERE id = $1 AND + user_id = $2`, + id, // $1 + userID, // %2 // $3 + ) + err := row.Scan(&res.ID, &res.UserID, &res.EncryptedData, &res.Header, &res.IsDeleted, &res.CreatedAt, &res.UpdatedAt) + if err != nil { + return model.Entity{}, stacktrace.Propagate(err, "failed to getTotpEntry") + } + return res, nil +} + +func (r *Repository) Delete(ctx context.Context, userID int64, id uuid.UUID) (bool, error) { + _, err := r.DB.ExecContext(ctx, + `UPDATE authenticator_entity SET is_deleted = true, encrypted_data = NULL, header = NULL where id=$1 and user_id = $2`, + id, userID) + if err != nil { + return false, stacktrace.Propagate(err, fmt.Sprintf("faield to delele totpEntry with id=%s", id)) + } + return true, nil +} + +func (r *Repository) Update(ctx context.Context, userID int64, req model.UpdateEntityRequest) error { + result, err := r.DB.ExecContext(ctx, + `UPDATE authenticator_entity SET encrypted_data = $1, header = $2 where id=$3 and user_id = $4 and is_deleted = FALSE`, + req.EncryptedData, req.Header, req.ID, userID) + if err != nil { + return stacktrace.Propagate(err, "") + } + affected, err := result.RowsAffected() + if err != nil { + return stacktrace.Propagate(err, "") + } + if affected != 1 { + return stacktrace.Propagate(errors.New("exactly one row should be updated"), "") + } + return nil +} + +// GetDiff returns the &{[]ente.TotpEntity} which have been added or +// modified after the given sinceTime +func (r *Repository) GetDiff(ctx context.Context, userID int64, sinceTime int64, limit int16) ([]model.Entity, error) { + rows, err := r.DB.QueryContext(ctx, `SELECT + id, user_id, encrypted_data, header, is_deleted, created_at, updated_at + FROM authenticator_entity + WHERE user_id = $1 + and updated_at > $2 + ORDER BY updated_at + LIMIT $3`, + userID, // $1 + sinceTime, // %2 + limit, // $3 + ) + if err != nil { + return nil, stacktrace.Propagate(err, "GetDiff query failed") + } + return convertRowsToToptEntity(rows) +} + +func convertRowsToToptEntity(rows *sql.Rows) ([]model.Entity, error) { + defer func() { + if err := rows.Close(); err != nil { + logrus.Error(err) + } + }() + result := make([]model.Entity, 0) + for rows.Next() { + entity := model.Entity{} + err := rows.Scan( + &entity.ID, &entity.UserID, &entity.EncryptedData, &entity.Header, &entity.IsDeleted, + &entity.CreatedAt, &entity.UpdatedAt) + if err != nil { + return nil, stacktrace.Propagate(err, "Failed to convert convertRowsToToptEntity") + } + result = append(result, entity) + } + return result, nil +} diff --git a/server/pkg/repo/authenticator/key.go b/server/pkg/repo/authenticator/key.go new file mode 100644 index 000000000..879a7699a --- /dev/null +++ b/server/pkg/repo/authenticator/key.go @@ -0,0 +1,41 @@ +package authenticator + +import ( + "context" + "database/sql" + "errors" + + "github.com/ente-io/museum/ente" + model "github.com/ente-io/museum/ente/authenticator" + "github.com/ente-io/stacktrace" +) + +// CreateTotpEntry inserts a new &{totp.CreateTotpEntry} entry +func (r *Repository) CreateKey(ctx context.Context, userID int64, entry model.CreateKeyRequest) error { + _, err := r.DB.ExecContext(ctx, `INSERT into authenticator_key( + user_id, + encrypted_key, + header) VALUES ($1,$2,$3)`, + userID, // $1 user_id + entry.EncryptedKey, // $2 encrypted_data + entry.Header) + + if err != nil { + return stacktrace.Propagate(err, "Failed to createTotpEntry") + } + return nil +} + +func (r *Repository) GetKey(ctx context.Context, userID int64) (model.Key, error) { + row := r.DB.QueryRowContext(ctx, `SELECT user_id, encrypted_key, header, + created_at from authenticator_key where user_id = $1`, userID) + var result model.Key + err := row.Scan(&result.UserID, &result.EncryptedKey, &result.Header, &result.CreatedAt) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + return result, stacktrace.Propagate(&ente.ErrNotFoundError, "authKey is not present") + } + return result, stacktrace.Propagate(err, "failed to authKey") + } + return result, nil +} diff --git a/server/pkg/repo/authenticator/repository.go b/server/pkg/repo/authenticator/repository.go new file mode 100644 index 000000000..ca2149a12 --- /dev/null +++ b/server/pkg/repo/authenticator/repository.go @@ -0,0 +1,11 @@ +package authenticator + +import ( + "database/sql" +) + +// Repository defines the methods for inserting, updating and retrieving +// authenticator related keys and entities from the underlying repository +type Repository struct { + DB *sql.DB +} diff --git a/server/pkg/repo/billing.go b/server/pkg/repo/billing.go new file mode 100644 index 000000000..12ca041e0 --- /dev/null +++ b/server/pkg/repo/billing.go @@ -0,0 +1,126 @@ +package repo + +import ( + "database/sql" + "encoding/json" + + "github.com/ente-io/stacktrace" + + "github.com/awa/go-iap/appstore" + "github.com/awa/go-iap/playstore" + "github.com/ente-io/museum/ente" + "google.golang.org/api/androidpublisher/v3" +) + +// BillingRepository defines the methods for inserting, updating and retrieving +// billing related entities from the underlying repository +type BillingRepository struct { + DB *sql.DB +} + +// AddSubscription adds a subscription against a userID +func (repo *BillingRepository) AddSubscription(s ente.Subscription) (int64, error) { + var subscriptionID int64 + err := repo.DB.QueryRow(`INSERT INTO subscriptions(user_id, storage, original_transaction_id, expiry_time, product_id, payment_provider, attributes) + VALUES($1, $2, $3, $4, $5, $6, $7) + RETURNING subscription_id`, s.UserID, s.Storage, + s.OriginalTransactionID, s.ExpiryTime, s.ProductID, s.PaymentProvider, + s.Attributes).Scan(&subscriptionID) + return subscriptionID, stacktrace.Propagate(err, "") +} + +// UpdateSubscriptionExpiryTime updates the expiryTime of a subscription +func (repo *BillingRepository) UpdateSubscriptionExpiryTime(subscriptionID int64, expiryTime int64) error { + _, err := repo.DB.Exec(`UPDATE subscriptions SET expiry_time = $1 WHERE subscription_id = $2`, expiryTime, subscriptionID) + return stacktrace.Propagate(err, "") +} + +// UpdateSubscriptionCancellationStatus updates whether the user will cancel his subscription on period end +func (repo *BillingRepository) UpdateSubscriptionCancellationStatus(userID int64, status bool) error { + _, err := repo.DB.Exec(`UPDATE subscriptions SET attributes = jsonb_set(attributes, '{isCancelled}', $1::jsonb) WHERE user_id = $2`, status, userID) + return stacktrace.Propagate(err, "") +} + +// GetUserSubscription returns the last created subscription for a userID +func (repo *BillingRepository) GetUserSubscription(userID int64) (ente.Subscription, error) { + var s ente.Subscription + row := repo.DB.QueryRow(`SELECT subscription_id, user_id, product_id, storage, original_transaction_id, expiry_time, payment_provider, attributes FROM subscriptions WHERE user_id = $1`, userID) + err := row.Scan(&s.ID, &s.UserID, &s.ProductID, &s.Storage, &s.OriginalTransactionID, &s.ExpiryTime, &s.PaymentProvider, &s.Attributes) + return s, stacktrace.Propagate(err, "") +} + +// GetSubscriptionForTransaction returns the subscription for a transactionID within a paymentProvider +func (repo *BillingRepository) GetSubscriptionForTransaction(transactionID string, paymentProvider ente.PaymentProvider) (ente.Subscription, error) { + var s ente.Subscription + row := repo.DB.QueryRow(`SELECT subscription_id, user_id, product_id, storage, original_transaction_id, expiry_time, payment_provider, attributes FROM subscriptions WHERE original_transaction_id = $1 AND payment_provider = $2`, transactionID, paymentProvider) + err := row.Scan(&s.ID, &s.UserID, &s.ProductID, &s.Storage, &s.OriginalTransactionID, &s.ExpiryTime, &s.PaymentProvider, &s.Attributes) + return s, stacktrace.Propagate(err, "") +} + +// UpdateTransactionIDOnDeletion just append `userID:` before original transaction id on account deletion. +// This is to ensure that any subscription update isn't accidently applied to the deleted account and +// if user want to use same subscription in different ente account, they should be able to do that. +func (repo *BillingRepository) UpdateTransactionIDOnDeletion(userID int64) error { + _, err := repo.DB.Query(`update subscriptions SET original_transaction_id = user_id || ':' || original_transaction_id where original_transaction_id is not NULL and user_id= $1`, userID) + return stacktrace.Propagate(err, "") +} + +// ReplaceSubscription replaces a subscription with a new one +func (repo *BillingRepository) ReplaceSubscription(subscriptionID int64, s ente.Subscription) error { + _, err := repo.DB.Exec(`UPDATE subscriptions + SET storage = $2, original_transaction_id = $3, expiry_time = $4, product_id = $5, payment_provider = $6, attributes = $7 + WHERE subscription_id = $1`, + subscriptionID, s.Storage, s.OriginalTransactionID, s.ExpiryTime, s.ProductID, s.PaymentProvider, s.Attributes) + return stacktrace.Propagate(err, "") +} + +// UpdateSubscription updates a subscription +func (repo *BillingRepository) UpdateSubscription( + subscriptionID int64, + storage int64, + paymentProvider ente.PaymentProvider, + transactionID string, + productID string, + expiryTime int64, +) error { + _, err := repo.DB.Exec(`UPDATE subscriptions + SET storage = $2, original_transaction_id = $3, expiry_time = $4, product_id = $5, payment_provider = $6 + WHERE subscription_id = $1`, + subscriptionID, storage, transactionID, expiryTime, productID, paymentProvider) + return stacktrace.Propagate(err, "") +} + +// LogPlayStorePush logs a notification from PlayStore +func (repo *BillingRepository) LogPlayStorePush(userID int64, notification playstore.DeveloperNotification, verificationResponse androidpublisher.SubscriptionPurchase) error { + notificationJSON, _ := json.Marshal(notification) + responseJSON, _ := json.Marshal(verificationResponse) + _, err := repo.DB.Exec(`INSERT INTO subscription_logs(user_id, payment_provider, notification, verification_response) VALUES($1, $2, $3, $4)`, + userID, ente.PlayStore, notificationJSON, responseJSON) + return stacktrace.Propagate(err, "") +} + +// LogAppStorePush logs a notification from AppStore +func (repo *BillingRepository) LogAppStorePush(userID int64, notification appstore.SubscriptionNotification, verificationResponse appstore.IAPResponse) error { + notificationJSON, _ := json.Marshal(notification) + responseJSON, _ := json.Marshal(verificationResponse) + _, err := repo.DB.Exec(`INSERT INTO subscription_logs(user_id, payment_provider, notification, verification_response) VALUES($1, $2, $3, $4)`, + userID, ente.AppStore, notificationJSON, responseJSON) + return stacktrace.Propagate(err, "") +} + +// LogStripePush logs a notification from Stripe +func (repo *BillingRepository) LogStripePush(eventLog ente.StripeEventLog) error { + notificationJSON, _ := json.Marshal(eventLog.Event) + responseJSON, _ := json.Marshal(eventLog.StripeSubscription) + _, err := repo.DB.Exec(`INSERT INTO subscription_logs(user_id, payment_provider, notification, verification_response) VALUES($1, $2, $3, $4)`, + eventLog.UserID, ente.Stripe, notificationJSON, responseJSON) + return stacktrace.Propagate(err, "") +} + +// LogStripePush logs a subscription modification by an admin +func (repo *BillingRepository) LogAdminTriggeredSubscriptionUpdate(r ente.UpdateSubscriptionRequest) error { + requestJSON, _ := json.Marshal(r) + _, err := repo.DB.Exec(`INSERT INTO subscription_logs(user_id, payment_provider, notification, verification_response) VALUES($1, $2, $3, '{}'::json)`, + r.UserID, r.PaymentProvider, requestJSON) + return stacktrace.Propagate(err, "") +} diff --git a/server/pkg/repo/cast/repo.go b/server/pkg/repo/cast/repo.go new file mode 100644 index 000000000..306c1d481 --- /dev/null +++ b/server/pkg/repo/cast/repo.go @@ -0,0 +1,117 @@ +package cast + +import ( + "context" + "database/sql" + "github.com/ente-io/museum/ente" + "github.com/ente-io/museum/pkg/utils/random" + "github.com/ente-io/stacktrace" + "github.com/google/uuid" + "strings" +) + +type Repository struct { + DB *sql.DB +} + +func (r *Repository) AddCode(ctx context.Context, code *string, pubKey string) (string, error) { + var codeValue string + var err error + if code == nil || *code == "" { + codeValue, err = random.GenerateSixDigitOtp() + if err != nil { + return "", stacktrace.Propagate(err, "") + } + } else { + codeValue = strings.TrimSpace(*code) + } + _, err = r.DB.ExecContext(ctx, "INSERT INTO casting (code, public_key, id) VALUES ($1, $2, $3)", codeValue, pubKey, uuid.New()) + if err != nil { + return "", err + } + return codeValue, nil +} + +// InsertCastData insert collection_id, cast_user, token and encrypted_payload for given code if collection_id is not null +func (r *Repository) InsertCastData(ctx context.Context, castUserID int64, code string, collectionID int64, castToken string, encryptedPayload string) error { + _, err := r.DB.ExecContext(ctx, "UPDATE casting SET collection_id = $1, cast_user = $2, token = $3, encrypted_payload = $4 WHERE code = $5 and is_deleted=false", collectionID, castUserID, castToken, encryptedPayload, code) + return err +} + +func (r *Repository) GetPubKey(ctx context.Context, code string) (string, error) { + var pubKey string + row := r.DB.QueryRowContext(ctx, "SELECT public_key FROM casting WHERE code = $1 and is_deleted=false", code) + err := row.Scan(&pubKey) + if err != nil { + if err == sql.ErrNoRows { + return "", ente.ErrNotFoundError.NewErr("code not found") + } + return "", err + } + return pubKey, nil +} + +func (r *Repository) GetEncCastData(ctx context.Context, code string) (*string, error) { + var payload sql.NullString + row := r.DB.QueryRowContext(ctx, "SELECT encrypted_payload FROM casting WHERE code = $1 and is_deleted=false", code) + err := row.Scan(&payload) + if err != nil { + if err == sql.ErrNoRows { + return nil, ente.ErrNotFoundError.NewErr("active code not found") + } + return nil, err + } + if !payload.Valid { + return nil, nil + } + res := &payload.String + return res, nil +} + +func (r *Repository) GetCollectionAndCasterIDForToken(ctx context.Context, token string) (int64, int64, error) { + var collection, userID int64 + row := r.DB.QueryRowContext(ctx, "SELECT collection_id, cast_user FROM casting WHERE token = $1 and is_deleted=false", token) + err := row.Scan(&collection, &userID) + if err != nil { + if err == sql.ErrNoRows { + return -1, -1, ente.ErrCastPermissionDenied.NewErr("invalid token") + } + return -1, -1, err + } + return collection, userID, nil +} + +func (r *Repository) UpdateLastUsedAtForToken(ctx context.Context, token string) error { + _, err := r.DB.ExecContext(ctx, "UPDATE casting SET last_used_at = now_utc_micro_seconds() WHERE token = $1", token) + if err != nil { + return err + } + return nil +} + +// DeleteOldCodes that are not associated with a collection and are older than the given time +func (r *Repository) DeleteOldCodes(ctx context.Context, expirtyTime int64) error { + _, err := r.DB.ExecContext(ctx, "DELETE FROM casting WHERE last_used_at < $1 and is_deleted=false and collection_id is null", expirtyTime) + if err != nil { + return err + } + return nil +} + +// RevokeTokenForUser code for given userID +func (r *Repository) RevokeTokenForUser(ctx context.Context, userId int64) error { + _, err := r.DB.ExecContext(ctx, "UPDATE casting SET is_deleted=true where cast_user=$1", userId) + return stacktrace.Propagate(err, "") +} + +// RevokeTokenForCollection code for given collectionID +func (r *Repository) RevokeTokenForCollection(ctx context.Context, collectionID int64) error { + _, err := r.DB.ExecContext(ctx, "UPDATE casting SET is_deleted=true where collection_id=$1", collectionID) + return stacktrace.Propagate(err, "") +} + +// RevokeForGivenUserAndCollection .. +func (r *Repository) RevokeForGivenUserAndCollection(ctx context.Context, collectionID int64, userID int64) error { + _, err := r.DB.ExecContext(ctx, "UPDATE casting SET is_deleted=true where collection_id=$1 and cast_user=$2", collectionID, userID) + return stacktrace.Propagate(err, "") +} diff --git a/server/pkg/repo/collection.go b/server/pkg/repo/collection.go new file mode 100644 index 000000000..952c03aa6 --- /dev/null +++ b/server/pkg/repo/collection.go @@ -0,0 +1,1046 @@ +package repo + +import ( + "context" + "database/sql" + "fmt" + "strconv" + "strings" + t "time" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/sirupsen/logrus" + + "github.com/ente-io/stacktrace" + + "github.com/ente-io/museum/ente" + "github.com/ente-io/museum/pkg/utils/crypto" + "github.com/ente-io/museum/pkg/utils/time" + "github.com/lib/pq" +) + +// CollectionRepository defines the methods for inserting, updating and +// retrieving collection entities from the underlying repository +type CollectionRepository struct { + DB *sql.DB + FileRepo *FileRepository + PublicCollectionRepo *PublicCollectionRepository + TrashRepo *TrashRepository + SecretEncryptionKey []byte + QueueRepo *QueueRepository + LatencyLogger *prometheus.HistogramVec +} + +type SharedCollection struct { + CollectionID int64 + ToUserID int64 + FromUserID int64 +} + +// Create creates a collection +func (repo *CollectionRepository) Create(c ente.Collection) (ente.Collection, error) { + + // Check if the app type can create collection + if !ente.App(c.App).IsValidForCollection() { + return ente.Collection{}, ente.ErrInvalidApp + } + + err := repo.DB.QueryRow(`INSERT INTO collections(owner_id, encrypted_key, key_decryption_nonce, name, encrypted_name, name_decryption_nonce, type, attributes, updation_time, magic_metadata, pub_magic_metadata, app) + VALUES($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12) RETURNING collection_id`, + c.Owner.ID, c.EncryptedKey, c.KeyDecryptionNonce, c.Name, c.EncryptedName, c.NameDecryptionNonce, c.Type, c.Attributes, c.UpdationTime, c.MagicMetadata, c.PublicMagicMetadata, c.App).Scan(&c.ID) + if err != nil { + if err.Error() == "pq: duplicate key value violates unique constraint \"collections_favorites_constraint_index\"" { + return ente.Collection{}, ente.ErrFavoriteCollectionAlreadyExist + } else if err.Error() == "pq: duplicate key value violates unique constraint \"collections_uncategorized_constraint_index_v2\"" { + return ente.Collection{}, ente.ErrUncategorizeCollectionAlreadyExists + } + } + return c, stacktrace.Propagate(err, "") +} + +// Get returns a collection identified by the collectionID +func (repo *CollectionRepository) Get(collectionID int64) (ente.Collection, error) { + row := repo.DB.QueryRow(`SELECT collection_id, owner_id, encrypted_key, key_decryption_nonce, name, encrypted_name, name_decryption_nonce, type, attributes, updation_time, is_deleted, magic_metadata, pub_magic_metadata + FROM collections + WHERE collection_id = $1`, collectionID) + var c ente.Collection + var name, encryptedName, nameDecryptionNonce sql.NullString + if err := row.Scan(&c.ID, &c.Owner.ID, &c.EncryptedKey, &c.KeyDecryptionNonce, &name, &encryptedName, &nameDecryptionNonce, &c.Type, &c.Attributes, &c.UpdationTime, &c.IsDeleted, &c.MagicMetadata, &c.PublicMagicMetadata); err != nil { + return c, stacktrace.Propagate(err, "") + } + if name.Valid && len(name.String) > 0 { + c.Name = name.String + } else { + c.EncryptedName = encryptedName.String + c.NameDecryptionNonce = nameDecryptionNonce.String + } + urlMap, err := repo.PublicCollectionRepo.GetCollectionToActivePublicURLMap(context.Background(), []int64{collectionID}) + if err != nil { + return ente.Collection{}, stacktrace.Propagate(err, "failed to get publicURL info") + } + if publicUrls, ok := urlMap[collectionID]; ok { + c.PublicURLs = publicUrls + } + return c, nil +} +func (repo *CollectionRepository) GetCollectionByType(userID int64, collectionType string) (ente.Collection, error) { + row := repo.DB.QueryRow(`SELECT collection_id, owner_id, encrypted_key, key_decryption_nonce, name, encrypted_name, name_decryption_nonce, type, attributes, updation_time, is_deleted, magic_metadata + FROM collections + WHERE owner_id = $1 and type = $2`, userID, collectionType) + var c ente.Collection + var name, encryptedName, nameDecryptionNonce sql.NullString + if err := row.Scan(&c.ID, &c.Owner.ID, &c.EncryptedKey, &c.KeyDecryptionNonce, &name, &encryptedName, &nameDecryptionNonce, &c.Type, &c.Attributes, &c.UpdationTime, &c.IsDeleted, &c.MagicMetadata); err != nil { + return c, stacktrace.Propagate(err, "") + } + if name.Valid && len(name.String) > 0 { + c.Name = name.String + } else { + c.EncryptedName = encryptedName.String + c.NameDecryptionNonce = nameDecryptionNonce.String + } + return c, nil +} + +// GetCollectionsOwnedByUser returns the list of collections that a user owns +// todo: refactor this method +func (repo *CollectionRepository) GetCollectionsOwnedByUser(userID int64, updationTime int64, app ente.App) ([]ente.Collection, error) { + rows, err := repo.DB.Query(` + SELECT collections.collection_id, collections.owner_id, collections.encrypted_key, collections.key_decryption_nonce, collections.name, collections.encrypted_name, collections.name_decryption_nonce, collections.type, collections.app, collections.attributes, collections.updation_time, collections.is_deleted, collections.magic_metadata, collections.pub_magic_metadata + FROM collections + WHERE collections.owner_id = $1 AND collections.updation_time > $2 AND app = $3`, userID, updationTime, strings.ToLower(string(app))) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + defer rows.Close() + collectionIDs := make([]int64, 0) + collections := make([]ente.Collection, 0) + result := make([]ente.Collection, 0) + for rows.Next() { + var c ente.Collection + var name, encryptedName, nameDecryptionNonce sql.NullString + if err := rows.Scan(&c.ID, &c.Owner.ID, &c.EncryptedKey, &c.KeyDecryptionNonce, &name, &encryptedName, &nameDecryptionNonce, &c.Type, &c.App, &c.Attributes, &c.UpdationTime, &c.IsDeleted, &c.MagicMetadata, &c.PublicMagicMetadata); err != nil { + return collections, stacktrace.Propagate(err, "") + } + if name.Valid && len(name.String) > 0 { + c.Name = name.String + } else { + c.EncryptedName = encryptedName.String + c.NameDecryptionNonce = nameDecryptionNonce.String + } + // TODO: Pull this information in the previous query + sharees, err := repo.GetSharees(c.ID) + if err != nil { + return collections, stacktrace.Propagate(err, "") + } + c.Sharees = sharees + collections = append(collections, c) + collectionIDs = append(collectionIDs, c.ID) + } + + urlMap, err := repo.PublicCollectionRepo.GetCollectionToActivePublicURLMap(context.Background(), collectionIDs) + if err != nil { + return nil, stacktrace.Propagate(err, "failed to get publicURL info") + } + for _, c := range collections { + c.PublicURLs = urlMap[c.ID] + result = append(result, c) + } + + return result, nil +} + +func (repo *CollectionRepository) GetCollectionsOwnedByUserV2(userID int64, updationTime int64, app ente.App) ([]ente.Collection, error) { + rows, err := repo.DB.Query(` + SELECT +c.collection_id, c.owner_id, c.encrypted_key,c.key_decryption_nonce, c.name, c.encrypted_name, c.name_decryption_nonce, c.type, c.app, c.attributes, c.updation_time, c.is_deleted, c.magic_metadata, c.pub_magic_metadata, +users.user_id, users.encrypted_email, users.email_decryption_nonce, cs.role_type, +pct.access_token, pct.valid_till, pct.device_limit, pct.created_at, pct.updated_at, pct.pw_hash, pct.pw_nonce, pct.mem_limit, pct.ops_limit, pct.enable_download, pct.enable_collect + FROM collections c + LEFT JOIN collection_shares cs + ON (cs.collection_id = c.collection_id AND cs.is_deleted = false) + LEFT JOIN users + ON (cs.to_user_id = users.user_id AND users.encrypted_email IS NOT NULL) + LEFT JOIN public_collection_tokens pct + ON (pct.collection_id = c.collection_id and pct.is_disabled=FALSE) + WHERE c.owner_id = $1 AND c.updation_time > $2 and c.app = $3`, userID, updationTime, string(app)) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + defer rows.Close() + collectionIDToValMap := map[int64]*ente.Collection{} + addPublicUrlMap := map[string]bool{} + result := make([]ente.Collection, 0) + for rows.Next() { + var c ente.Collection + var name, encryptedName, nameDecryptionNonce sql.NullString + var pctDeviceLimit sql.NullInt32 + var pctEnableDownload, pctEnableCollect sql.NullBool + var shareUserID, pctValidTill, pctCreatedAt, pctUpdatedAt, pctMemLimit, pctOpsLimit sql.NullInt64 + var encryptedEmail, nonce []byte + var shareeRoleType, pctToken, pctPwHash, pctPwNonce sql.NullString + + if err := rows.Scan(&c.ID, &c.Owner.ID, &c.EncryptedKey, &c.KeyDecryptionNonce, &name, &encryptedName, &nameDecryptionNonce, &c.Type, &c.App, &c.Attributes, &c.UpdationTime, &c.IsDeleted, &c.MagicMetadata, &c.PublicMagicMetadata, + &shareUserID, &encryptedEmail, &nonce, &shareeRoleType, + &pctToken, &pctValidTill, &pctDeviceLimit, &pctCreatedAt, &pctUpdatedAt, &pctPwHash, &pctPwNonce, &pctMemLimit, &pctOpsLimit, &pctEnableDownload, &pctEnableCollect); err != nil { + return nil, stacktrace.Propagate(err, "") + } + + if _, ok := collectionIDToValMap[c.ID]; !ok { + if name.Valid && len(name.String) > 0 { + c.Name = name.String + } else { + c.EncryptedName = encryptedName.String + c.NameDecryptionNonce = nameDecryptionNonce.String + } + c.Sharees = make([]ente.CollectionUser, 0) + c.PublicURLs = make([]ente.PublicURL, 0) + collectionIDToValMap[c.ID] = &c + + } + currentCollection := collectionIDToValMap[c.ID] + if shareUserID.Valid { + sharedUser := ente.CollectionUser{ + ID: shareUserID.Int64, + Role: ente.ConvertStringToCollectionParticipantRole(shareeRoleType.String), + } + email, err := crypto.Decrypt(encryptedEmail, repo.SecretEncryptionKey, nonce) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + sharedUser.Email = email + currentCollection.Sharees = append(currentCollection.Sharees, sharedUser) + } + + if pctToken.Valid { + if _, ok := addPublicUrlMap[pctToken.String]; !ok { + addPublicUrlMap[pctToken.String] = true + url := ente.PublicURL{ + URL: fmt.Sprintf(BaseShareURL, pctToken.String), + DeviceLimit: int(pctDeviceLimit.Int32), + ValidTill: pctValidTill.Int64, + EnableDownload: pctEnableDownload.Bool, + EnableCollect: pctEnableCollect.Bool, + PasswordEnabled: pctPwNonce.Valid, + } + if pctPwNonce.Valid { + url.Nonce = &pctPwNonce.String + url.MemLimit = &pctMemLimit.Int64 + url.OpsLimit = &pctOpsLimit.Int64 + } + currentCollection.PublicURLs = append(currentCollection.PublicURLs, url) + } + } + } + for _, collection := range collectionIDToValMap { + result = append(result, *collection) + } + return result, nil +} + +// GetCollectionsSharedWithUser returns the list of collections that are shared +// with a user +func (repo *CollectionRepository) GetCollectionsSharedWithUser(userID int64, updationTime int64, app ente.App) ([]ente.Collection, error) { + rows, err := repo.DB.Query(` + SELECT collections.collection_id, collections.owner_id, users.encrypted_email, users.email_decryption_nonce, collection_shares.encrypted_key, collections.name, collections.encrypted_name, collections.name_decryption_nonce, collections.type, collections.app, collections.pub_magic_metadata, collection_shares.magic_metadata, collections.updation_time, collection_shares.is_deleted + FROM collections + INNER JOIN users + ON collections.owner_id = users.user_id + INNER JOIN collection_shares + ON collections.collection_id = collection_shares.collection_id AND collection_shares.to_user_id = $1 AND (collection_shares.updation_time > $2 OR collections.updation_time > $2) AND users.encrypted_email IS NOT NULL AND app = $3`, userID, updationTime, string(app)) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + defer rows.Close() + + collections := make([]ente.Collection, 0) + for rows.Next() { + var c ente.Collection + var collectionName, encryptedName, nameDecryptionNonce sql.NullString + var encryptedEmail, emailDecryptionNonce []byte + if err := rows.Scan(&c.ID, &c.Owner.ID, &encryptedEmail, &emailDecryptionNonce, &c.EncryptedKey, &collectionName, &encryptedName, &nameDecryptionNonce, &c.Type, &c.App, &c.PublicMagicMetadata, &c.SharedMagicMetadata, &c.UpdationTime, &c.IsDeleted); err != nil { + return collections, stacktrace.Propagate(err, "") + } + if collectionName.Valid && len(collectionName.String) > 0 { + c.Name = collectionName.String + } else { + c.EncryptedName = encryptedName.String + c.NameDecryptionNonce = nameDecryptionNonce.String + } + // if collection is unshared, no need to parse owner's email. Email decryption will fail if the owner's account is deleted + if c.IsDeleted { + c.Owner.Email = "" + } else { + email, err := crypto.Decrypt(encryptedEmail, repo.SecretEncryptionKey, emailDecryptionNonce) + if err != nil { + return collections, stacktrace.Propagate(err, "failed to decrypt email") + } + c.Owner.Email = email + } + // TODO: Pull this information in the previous query + if c.IsDeleted { + // if collection is deleted or unshared, c.IsDeleted will be true. In both cases, we should not send + // back information about other sharees + c.Sharees = make([]ente.CollectionUser, 0) + } else { + sharees, err := repo.GetSharees(c.ID) + if err != nil { + return collections, stacktrace.Propagate(err, "") + } + c.Sharees = sharees + } + collections = append(collections, c) + } + return collections, nil +} + +// GetCollectionIDsSharedWithUser returns the list of collections that a user has access to +func (repo *CollectionRepository) GetCollectionIDsSharedWithUser(userID int64) ([]int64, error) { + rows, err := repo.DB.Query(` + SELECT collection_id + FROM collection_shares + WHERE collection_shares.to_user_id = $1 + AND collection_shares.is_deleted = $2`, userID, false) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + defer rows.Close() + + cIDs := make([]int64, 0) + for rows.Next() { + var cID int64 + if err := rows.Scan(&cID); err != nil { + return cIDs, stacktrace.Propagate(err, "") + } + cIDs = append(cIDs, cID) + } + return cIDs, nil +} + +// GetCollectionIDsOwnedByUser returns the map of collectionID (owned by user) to collection deletion status +func (repo *CollectionRepository) GetCollectionIDsOwnedByUser(userID int64) (map[int64]bool, error) { + rows, err := repo.DB.Query(` + SELECT collection_id, is_deleted + FROM collections + WHERE owner_id = $1 + AND is_deleted = $2`, userID, false) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + defer rows.Close() + + result := make(map[int64]bool, 0) + + for rows.Next() { + var cID int64 + var isDeleted bool + if err := rows.Scan(&cID, &isDeleted); err != nil { + return result, stacktrace.Propagate(err, "") + } + result[cID] = isDeleted + } + return result, nil +} + +// GetAllSharedCollections returns list of SharedCollection in which the given user is involed +func (repo *CollectionRepository) GetAllSharedCollections(ctx context.Context, userID int64) ([]SharedCollection, error) { + rows, err := repo.DB.QueryContext(ctx, `SELECT collection_id, to_user_id, from_user_id + FROM collection_shares + WHERE (to_user_id = $1 or from_user_id = $1) + AND is_deleted = $2`, userID, false) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + defer rows.Close() + result := make([]SharedCollection, 0) + for rows.Next() { + logrus.Info("reading row") + var sharedCollection SharedCollection + if err := rows.Scan(&sharedCollection.CollectionID, &sharedCollection.ToUserID, &sharedCollection.FromUserID); err != nil { + logrus.WithError(err).Info("failed to scan") + return result, stacktrace.Propagate(err, "") + } + result = append(result, sharedCollection) + } + return result, nil +} + +// DoesFileExistInCollections returns true if the file exists in one of the +// provided collections +func (repo *CollectionRepository) DoesFileExistInCollections(fileID int64, cIDs []int64) (bool, error) { + var exists bool + err := repo.DB.QueryRow(`SELECT EXISTS (SELECT 1 FROM collection_files WHERE file_id = $1 AND is_deleted = $2 AND collection_id = ANY ($3))`, + fileID, false, pq.Array(cIDs)).Scan(&exists) + return exists, stacktrace.Propagate(err, "") +} + +// GetCollectionShareeRole returns true if the collection is shared with the user +func (repo *CollectionRepository) GetCollectionShareeRole(cID int64, userID int64) (*ente.CollectionParticipantRole, error) { + var role *ente.CollectionParticipantRole + err := repo.DB.QueryRow(`(SELECT role_type FROM collection_shares WHERE collection_id = $1 AND to_user_id = $2 AND is_deleted = $3)`, + cID, userID, false).Scan(&role) + return role, stacktrace.Propagate(err, "") +} + +func (repo *CollectionRepository) GetOwnerID(collectionID int64) (int64, error) { + row := repo.DB.QueryRow(`SELECT owner_id FROM collections WHERE collection_id = $1`, collectionID) + var ownerID int64 + err := row.Scan(&ownerID) + return ownerID, stacktrace.Propagate(err, "failed to get collection owner") +} + +// GetCollectionsFilesCount returns the number of non-deleted files which are present in the given collection +func (repo *CollectionRepository) GetCollectionsFilesCount(collectionID int64) (int64, error) { + row := repo.DB.QueryRow(`SELECT count(*) FROM collection_files WHERE collection_id=$1 AND is_deleted = false`, collectionID) + var count int64 = 0 + err := row.Scan(&count) + if err != nil { + return -1, stacktrace.Propagate(err, "") + } + return count, nil +} + +// Share shares a collection with a userID +func (repo *CollectionRepository) Share( + collectionID int64, + fromUserID int64, + toUserID int64, + encryptedKey string, + role ente.CollectionParticipantRole, + updationTime int64) error { + context := context.Background() + tx, err := repo.DB.BeginTx(context, nil) + if err != nil { + return stacktrace.Propagate(err, "") + } + if role != ente.VIEWER && role != ente.COLLABORATOR { + err = fmt.Errorf("invalid role %s", string(role)) + return stacktrace.Propagate(err, "") + } + _, err = tx.ExecContext(context, `INSERT INTO collection_shares(collection_id, from_user_id, to_user_id, encrypted_key, updation_time, role_type) VALUES($1, $2, $3, $4, $5, $6) + ON CONFLICT (collection_id, from_user_id, to_user_id) + DO UPDATE SET(is_deleted, updation_time, role_type) = (FALSE, $5, $6)`, + collectionID, fromUserID, toUserID, encryptedKey, updationTime, role) + if err != nil { + tx.Rollback() + return stacktrace.Propagate(err, "") + } + _, err = tx.ExecContext(context, `UPDATE collections SET updation_time = $1 WHERE collection_id = $2`, updationTime, collectionID) + if err != nil { + tx.Rollback() + return stacktrace.Propagate(err, "") + } + err = tx.Commit() + return stacktrace.Propagate(err, "") +} + +// UpdateShareeMetadata shares a collection with a userID +func (repo *CollectionRepository) UpdateShareeMetadata( + collectionID int64, + ownerUserID int64, + shareeUserID int64, + metadata ente.MagicMetadata, + updationTime int64) error { + context := context.Background() + tx, err := repo.DB.BeginTx(context, nil) + if err != nil { + return stacktrace.Propagate(err, "") + } + // Update collection_shares metadata if the collection is not deleted + sqlResult, err := tx.ExecContext(context, `UPDATE collection_shares SET magic_metadata = $1, updation_time = $2 WHERE collection_id = $3 AND from_user_id = $4 AND to_user_id = $5 AND is_deleted = $6`, + metadata, updationTime, collectionID, ownerUserID, shareeUserID, false) + if err != nil { + tx.Rollback() + return stacktrace.Propagate(err, "") + } + // verify that only one row is affected + affected, err := sqlResult.RowsAffected() + if err != nil { + tx.Rollback() + return stacktrace.Propagate(err, "") + } + if affected != 1 { + tx.Rollback() + err = fmt.Errorf("invalid number of rows affected %d", affected) + return stacktrace.Propagate(err, "") + } + + _, err = tx.ExecContext(context, `UPDATE collections SET updation_time = $1 WHERE collection_id = $2`, updationTime, collectionID) + if err != nil { + tx.Rollback() + return stacktrace.Propagate(err, "") + } + err = tx.Commit() + return stacktrace.Propagate(err, "") +} + +// UnShare un-shares a collection from a userID +func (repo *CollectionRepository) UnShare(collectionID int64, toUserID int64) error { + updationTime := time.Microseconds() + context := context.Background() + tx, err := repo.DB.BeginTx(context, nil) + if err != nil { + return stacktrace.Propagate(err, "") + } + _, err = tx.ExecContext(context, `UPDATE collection_shares + SET is_deleted = $1, updation_time = $2 + WHERE collection_id = $3 AND to_user_id = $4`, true, updationTime, collectionID, toUserID) + if err != nil { + tx.Rollback() + return stacktrace.Propagate(err, "") + } + // remove all the files which were added by this user + // todo: should we also add c_owner_id != toUserId + _, err = tx.ExecContext(context, `UPDATE collection_files + SET is_deleted = $1, updation_time = $2 + WHERE collection_id = $3 AND f_owner_id = $4`, true, updationTime, collectionID, toUserID) + if err != nil { + tx.Rollback() + return stacktrace.Propagate(err, "") + } + + _, err = tx.ExecContext(context, `UPDATE collections SET updation_time = $1 + WHERE collection_id = $2`, updationTime, collectionID) + if err != nil { + tx.Rollback() + return stacktrace.Propagate(err, "") + } + err = tx.Commit() + return stacktrace.Propagate(err, "") +} + +// AddFiles adds files to a collection +func (repo *CollectionRepository) AddFiles( + collectionID int64, + collectionOwnerID int64, + files []ente.CollectionFileItem, + fileOwnerID int64, +) error { + updationTime := time.Microseconds() + context := context.Background() + tx, err := repo.DB.BeginTx(context, nil) + if err != nil { + return stacktrace.Propagate(err, "") + } + for _, file := range files { + _, err := tx.ExecContext(context, `INSERT INTO collection_files + (collection_id, file_id, encrypted_key, key_decryption_nonce, is_deleted, updation_time, c_owner_id, f_owner_id) + VALUES($1, $2, $3, $4, $5, $6, $7, $8) + ON CONFLICT ON CONSTRAINT unique_collection_files_cid_fid + DO UPDATE SET(is_deleted, updation_time) = ($5, $6)`, collectionID, file.ID, file.EncryptedKey, + file.KeyDecryptionNonce, false, updationTime, collectionOwnerID, fileOwnerID) + if err != nil { + tx.Rollback() + return stacktrace.Propagate(err, "") + } + } + _, err = tx.ExecContext(context, `UPDATE collections SET updation_time = $1 + WHERE collection_id = $2`, updationTime, collectionID) + if err != nil { + tx.Rollback() + return stacktrace.Propagate(err, "") + } + err = tx.Commit() + return stacktrace.Propagate(err, "") +} + +func (repo *CollectionRepository) RestoreFiles(ctx context.Context, userID int64, collectionID int64, newCollectionFiles []ente.CollectionFileItem) error { + fileIDs := make([]int64, 0) + for _, newFile := range newCollectionFiles { + fileIDs = append(fileIDs, newFile.ID) + } + // verify that all files are restorable + _, canRestoreAllFiles, err := repo.TrashRepo.GetFilesInTrashState(ctx, userID, fileIDs) + if err != nil { + return stacktrace.Propagate(err, "") + } + if !canRestoreAllFiles { + return stacktrace.Propagate(ente.ErrBadRequest, "some fileIDs are not restorable") + } + + tx, err := repo.DB.BeginTx(ctx, nil) + updationTime := time.Microseconds() + if err != nil { + return stacktrace.Propagate(err, "") + } + + for _, file := range newCollectionFiles { + _, err := tx.ExecContext(ctx, `INSERT INTO collection_files + (collection_id, file_id, encrypted_key, key_decryption_nonce, is_deleted, updation_time, c_owner_id, f_owner_id) + VALUES($1, $2, $3, $4, $5, $6, $7, $8) + ON CONFLICT ON CONSTRAINT unique_collection_files_cid_fid + DO UPDATE SET(is_deleted, updation_time) = ($5, $6)`, collectionID, file.ID, file.EncryptedKey, + file.KeyDecryptionNonce, false, updationTime, userID, userID) + if err != nil { + tx.Rollback() + return stacktrace.Propagate(err, "") + } + } + _, err = tx.ExecContext(ctx, `UPDATE collections SET updation_time = $1 + WHERE collection_id = $2`, updationTime, collectionID) + if err != nil { + tx.Rollback() + return stacktrace.Propagate(err, "") + } + + _, err = tx.ExecContext(ctx, `UPDATE trash SET is_restored = true + WHERE user_id = $1 and file_id = ANY ($2)`, userID, pq.Array(fileIDs)) + if err != nil { + tx.Rollback() + return stacktrace.Propagate(err, "") + } + return tx.Commit() +} + +// RemoveFilesV3 just remove the entries from the collection. This method assume that collection owner is +// different from the file owners +func (repo *CollectionRepository) RemoveFilesV3(context context.Context, collectionID int64, fileIDs []int64) error { + updationTime := time.Microseconds() + tx, err := repo.DB.BeginTx(context, nil) + if err != nil { + return stacktrace.Propagate(err, "") + } + _, err = tx.ExecContext(context, `UPDATE collection_files + SET is_deleted = $1, updation_time = $2 WHERE collection_id = $3 AND file_id = ANY($4)`, + true, updationTime, collectionID, pq.Array(fileIDs)) + if err != nil { + tx.Rollback() + return stacktrace.Propagate(err, "") + } + _, err = tx.ExecContext(context, `UPDATE collections SET updation_time = $1 + WHERE collection_id = $2`, updationTime, collectionID) + if err != nil { + tx.Rollback() + return stacktrace.Propagate(err, "") + } + err = tx.Commit() + return stacktrace.Propagate(err, "") +} + +// MoveFiles move files from one collection to another collection +func (repo *CollectionRepository) MoveFiles(ctx context.Context, + toCollectionID int64, fromCollectionID int64, + fileItems []ente.CollectionFileItem, + collectionOwner int64, + fileOwner int64, +) error { + if collectionOwner != fileOwner { + return fmt.Errorf("move is not supported when collection and file onwer are different") + } + updationTime := time.Microseconds() + tx, err := repo.DB.BeginTx(ctx, nil) + if err != nil { + return stacktrace.Propagate(err, "") + } + fileIDs := make([]int64, 0) + for _, file := range fileItems { + fileIDs = append(fileIDs, file.ID) + _, err := tx.ExecContext(ctx, `INSERT INTO collection_files + (collection_id, file_id, encrypted_key, key_decryption_nonce, is_deleted, updation_time, c_owner_id, f_owner_id) + VALUES($1, $2, $3, $4, $5, $6, $7, $8) + ON CONFLICT ON CONSTRAINT unique_collection_files_cid_fid + DO UPDATE SET(is_deleted, updation_time) = ($5, $6)`, toCollectionID, file.ID, file.EncryptedKey, + file.KeyDecryptionNonce, false, updationTime, collectionOwner, fileOwner) + if err != nil { + if rollbackErr := tx.Rollback(); rollbackErr != nil { + logrus.WithError(rollbackErr).Error("transaction rollback failed") + return stacktrace.Propagate(rollbackErr, "") + } + return stacktrace.Propagate(err, "") + } + } + _, err = tx.ExecContext(ctx, `UPDATE collection_files + SET is_deleted = $1, updation_time = $2 WHERE collection_id = $3 AND file_id = ANY($4)`, + true, updationTime, fromCollectionID, pq.Array(fileIDs)) + if err != nil { + if rollbackErr := tx.Rollback(); rollbackErr != nil { + logrus.WithError(rollbackErr).Error("transaction rollback failed") + return stacktrace.Propagate(rollbackErr, "") + } + return stacktrace.Propagate(err, "") + } + _, err = tx.ExecContext(ctx, `UPDATE collections SET updation_time = $1 + WHERE (collection_id = $2 or collection_id = $3 )`, updationTime, toCollectionID, fromCollectionID) + if err != nil { + if rollbackErr := tx.Rollback(); rollbackErr != nil { + logrus.WithError(rollbackErr).Error("transaction rollback failed") + return stacktrace.Propagate(rollbackErr, "") + } + return stacktrace.Propagate(err, "") + } + return tx.Commit() +} + +// GetDiff returns the diff of files added or modified within a collection since +// the specified time +func (repo *CollectionRepository) GetDiff(collectionID int64, sinceTime int64, limit int) ([]ente.File, error) { + startTime := t.Now() + defer func() { + repo.LatencyLogger.WithLabelValues("CollectionRepo.GetDiff"). + Observe(float64(t.Since(startTime).Milliseconds())) + }() + rows, err := repo.DB.Query(` + SELECT files.file_id, files.owner_id, collection_files.collection_id, collection_files.c_owner_id, + collection_files.encrypted_key, collection_files.key_decryption_nonce, + files.file_decryption_header, files.thumbnail_decryption_header, + files.metadata_decryption_header, files.encrypted_metadata, files.magic_metadata, files.pub_magic_metadata, + files.info, collection_files.is_deleted, collection_files.updation_time + FROM files + INNER JOIN collection_files + ON collection_files.file_id = files.file_id + AND collection_files.collection_id = $1 + AND collection_files.updation_time > $2 + ORDER BY collection_files.updation_time LIMIT $3`, + collectionID, sinceTime, limit) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + return convertRowsToFiles(rows) +} + +func (repo *CollectionRepository) GetFilesWithVersion(collectionID int64, updateAtTime int64) ([]ente.File, error) { + startTime := t.Now() + defer func() { + repo.LatencyLogger.WithLabelValues("CollectionRepo.GetFilesWithVersion"). + Observe(float64(t.Since(startTime).Milliseconds())) + }() + rows, err := repo.DB.Query(` + SELECT files.file_id, files.owner_id, collection_files.collection_id, collection_files.c_owner_id, + collection_files.encrypted_key, collection_files.key_decryption_nonce, + files.file_decryption_header, files.thumbnail_decryption_header, + files.metadata_decryption_header, files.encrypted_metadata, files.magic_metadata, files.pub_magic_metadata, + files.info, collection_files.is_deleted, collection_files.updation_time + FROM files + INNER JOIN collection_files + ON collection_files.file_id = files.file_id + AND collection_files.collection_id = $1 + AND collection_files.updation_time = $2`, + collectionID, updateAtTime) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + return convertRowsToFiles(rows) +} + +func (repo *CollectionRepository) GetFile(collectionID int64, fileID int64) ([]ente.File, error) { + rows, err := repo.DB.Query(` + SELECT files.file_id, files.owner_id, collection_files.collection_id, collection_files.c_owner_id, + collection_files.encrypted_key, collection_files.key_decryption_nonce, + files.file_decryption_header, files.thumbnail_decryption_header, + files.metadata_decryption_header, files.encrypted_metadata, files.magic_metadata, files.pub_magic_metadata, + files.info, collection_files.is_deleted, collection_files.updation_time + FROM files + INNER JOIN collection_files + ON collection_files.file_id = files.file_id + AND collection_files.collection_id = $1 + AND collection_files.file_id = $2`, + collectionID, fileID) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + files, err := convertRowsToFiles(rows) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + return files, nil +} + +// GetSharees returns the list of users a collection has been shared with +func (repo *CollectionRepository) GetSharees(cID int64) ([]ente.CollectionUser, error) { + rows, err := repo.DB.Query(` + SELECT users.user_id, users.encrypted_email, users.email_decryption_nonce, collection_shares.role_type + FROM users + INNER JOIN collection_shares + ON (collection_shares.collection_id = $1 AND collection_shares.to_user_id = users.user_id AND collection_shares.is_deleted = $2 AND users.encrypted_email IS NOT NULL)`, + cID, false) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + defer rows.Close() + + users := make([]ente.CollectionUser, 0) + for rows.Next() { + var user ente.CollectionUser + var encryptedEmail, nonce []byte + if err := rows.Scan(&user.ID, &encryptedEmail, &nonce, &user.Role); err != nil { + return users, stacktrace.Propagate(err, "") + } + email, err := crypto.Decrypt(encryptedEmail, repo.SecretEncryptionKey, nonce) + if err != nil { + return users, stacktrace.Propagate(err, "") + } + user.Email = email + users = append(users, user) + } + return users, nil +} + +// getCollectionExclusiveFiles return a list of filesIDs that are exclusive to the collection +func (repo *CollectionRepository) getCollectionExclusiveFiles(collectionID int64, collectionOwnerID int64) ([]int64, error) { + rows, err := repo.DB.Query(` + SELECT file_id + FROM collection_files + WHERE is_deleted=false + AND file_id IN ( + SELECT file_id + FROM collection_files + WHERE is_deleted=false + AND collection_id =$1 + ) + AND collection_id IN (SELECT collection_id from collections where owner_id = $2) + GROUP BY file_id + HAVING COUNT(file_id) = 1`, collectionID, collectionOwnerID) + if err != nil { + return make([]int64, 0), stacktrace.Propagate(err, "") + } + return convertRowsToFileId(rows) +} + +// GetCollectionFileIDs return list of fileIDs are currently present in the given collection +// and fileIDs are owned by the collection owner +func (repo *CollectionRepository) GetCollectionFileIDs(collectionID int64, collectionOwnerID int64) ([]int64, error) { + // Collaboration Todo: Filter out files which are not owned by the collection owner + rows, err := repo.DB.Query( + `SELECT file_id + FROM collection_files + WHERE is_deleted=false + AND collection_id =$1 AND (f_owner_id is null or f_owner_id = $2)`, collectionID, collectionOwnerID) + if err != nil { + return make([]int64, 0), stacktrace.Propagate(err, "") + } + return convertRowsToFileId(rows) +} + +func convertRowsToFileId(rows *sql.Rows) ([]int64, error) { + fileIDs := make([]int64, 0) + defer rows.Close() + for rows.Next() { + var fileID int64 + if err := rows.Scan(&fileID); err != nil { + return fileIDs, stacktrace.Propagate(err, "") + } + fileIDs = append(fileIDs, fileID) + } + return fileIDs, nil +} + +// TrashV2 removes an entry in the database for the collection referred to by `collectionID` and move all files +// which are exclusive to this collection to trash +// Deprecated. Please use TrashV3 +func (repo *CollectionRepository) TrashV2(collectionID int64, userID int64) error { + ctx := context.Background() + tx, err := repo.DB.BeginTx(ctx, nil) + if err != nil { + return stacktrace.Propagate(err, "") + } + fileIDs, err := repo.getCollectionExclusiveFiles(collectionID, userID) + if err != nil { + tx.Rollback() + return stacktrace.Propagate(err, "") + } + items := make([]ente.TrashItemRequest, 0) + for _, fileID := range fileIDs { + items = append(items, ente.TrashItemRequest{ + FileID: fileID, + CollectionID: collectionID, + }) + } + _, err = tx.ExecContext(ctx, `UPDATE collection_files SET is_deleted = true WHERE collection_id = $1`, collectionID) + if err != nil { + tx.Rollback() + return stacktrace.Propagate(err, "") + } + err = repo.TrashRepo.InsertItems(ctx, tx, userID, items) + + if err != nil { + tx.Rollback() + return stacktrace.Propagate(err, "") + } + return tx.Commit() +} + +// TrashV3 move the files belonging to the collection owner to the trash +func (repo *CollectionRepository) TrashV3(ctx context.Context, collectionID int64) error { + log := logrus.WithFields(logrus.Fields{ + "deleting_collection": collectionID, + }) + collection, err := repo.Get(collectionID) + if err != nil { + log.WithError(err).Error("failed to get collection") + return stacktrace.Propagate(err, "") + } + ownerID := collection.Owner.ID + fileIDs, err := repo.GetCollectionFileIDs(collectionID, ownerID) + if err != nil { + log.WithError(err).Error("failed to get fileIDs") + return stacktrace.Propagate(err, "") + } + log.WithField("file_count", len(fileIDs)).Info("Fetched fileIDs") + batchSize := 2000 + for i := 0; i < len(fileIDs); i += batchSize { + end := i + batchSize + if end > len(fileIDs) { + end = len(fileIDs) + } + batch := fileIDs[i:end] + err := repo.FileRepo.VerifyFileOwner(ctx, batch, ownerID, log) + if err != nil { + return stacktrace.Propagate(err, "") + } + items := make([]ente.TrashItemRequest, 0) + for _, fileID := range batch { + items = append(items, ente.TrashItemRequest{ + FileID: fileID, + CollectionID: collectionID, + }) + } + err = repo.TrashRepo.TrashFiles(fileIDs, ownerID, ente.TrashRequest{OwnerID: ownerID, TrashItems: items}) + if err != nil { + log.WithError(err).Error("failed to trash file") + return stacktrace.Propagate(err, "") + } + } + // Verify that all files are processed in the collection. + count, err := repo.GetCollectionsFilesCount(collectionID) + if err != nil { + return stacktrace.Propagate(err, "") + } + if count != 0 { + removedFiles, removeErr := repo.removeAllFilesAddedByOthers(collectionID) + if removeErr != nil { + return stacktrace.Propagate(removeErr, "") + } + if count != removedFiles { + return fmt.Errorf("investigate: collection %d still has %d files which are not deleted", collectionID, removedFiles-count) + } else { + logrus.WithField("collection_id", collectionID). + WithField("file_count", count). + WithField("removed_files", removedFiles). + Info("All files are removed from the collection") + return nil + } + } + return nil +} + +func (repo *CollectionRepository) removeAllFilesAddedByOthers(collectionID int64) (int64, error) { + var fileIDs []int64 + rows, err := repo.DB.Query(`SELECT file_id FROM collection_files WHERE collection_id = $1 AND is_deleted=false AND f_owner_id IS NOT NULL AND c_owner_id IS NOT NULL AND f_owner_id <> c_owner_id`, collectionID) + if err != nil { + return 0, stacktrace.Propagate(err, "") + } + defer rows.Close() + for rows.Next() { + var fileID int64 + if err := rows.Scan(&fileID); err != nil { + return 0, stacktrace.Propagate(err, "") + } + fileIDs = append(fileIDs, fileID) + } + if len(fileIDs) == 0 { + return 0, nil + } + removeErr := repo.RemoveFilesV3(context.Background(), collectionID, fileIDs) + if removeErr != nil { + return 0, stacktrace.Propagate(removeErr, "") + } + return int64(len(fileIDs)), nil +} + +// ScheduleDelete marks the collection as deleted and queue up an operation to +// move the collection files to user's trash. +// The deleteOnlyExcluiveFiles flag is true for v2 collection delete and is false for v3 version. +// See [Collection Delete Versions] for more details +func (repo *CollectionRepository) ScheduleDelete( + collectionID int64, + deleteOnlyExcluiveFiles bool) error { + updationTime := time.Microseconds() + ctx := context.Background() + tx, err := repo.DB.BeginTx(ctx, nil) + if err != nil { + return stacktrace.Propagate(err, "") + } + _, err = tx.ExecContext(ctx, `UPDATE collection_shares + SET is_deleted = $1, updation_time = $2 + WHERE collection_id = $3`, true, updationTime, collectionID) + if err != nil { + tx.Rollback() + return stacktrace.Propagate(err, "") + } + _, err = tx.ExecContext(ctx, `UPDATE collections + SET is_deleted = $1, updation_time = $2 + WHERE collection_id = $3`, true, updationTime, collectionID) + if err != nil { + tx.Rollback() + return stacktrace.Propagate(err, "") + } + if deleteOnlyExcluiveFiles { + err = repo.QueueRepo.AddItems(ctx, tx, TrashCollectionQueue, []string{strconv.FormatInt(collectionID, 10)}) + } else { + err = repo.QueueRepo.AddItems(ctx, tx, TrashCollectionQueueV3, []string{strconv.FormatInt(collectionID, 10)}) + } + + if err != nil { + tx.Rollback() + return stacktrace.Propagate(err, "") + } + err = tx.Commit() + return stacktrace.Propagate(err, "") +} + +// Rename updates the collection's name by updating the encrypted_name and name_decryption_nonce of the collection +func (repo *CollectionRepository) Rename(collectionID int64, encryptedName string, nameDecryptionNonce string) error { + updationTime := time.Microseconds() + _, err := repo.DB.Exec(`UPDATE collections + SET encrypted_name = $1, + name_decryption_nonce=$2, + updation_time=$3 + WHERE collection_id = $4`, + encryptedName, nameDecryptionNonce, updationTime, collectionID) + return stacktrace.Propagate(err, "") +} + +// UpdateMagicMetadata updates the magic metadata for the given collection +func (repo *CollectionRepository) UpdateMagicMetadata(ctx context.Context, + collectionID int64, + magicMetadata ente.MagicMetadata, + isPublicMetadata bool, +) error { + updationTime := time.Microseconds() + magicMetadata.Version = magicMetadata.Version + 1 + var err error + if isPublicMetadata { + _, err = repo.DB.ExecContext(ctx, `UPDATE collections + SET pub_magic_metadata = $1, + updation_time=$2 + WHERE collection_id = $3`, + magicMetadata, updationTime, collectionID) + } else { + _, err = repo.DB.ExecContext(ctx, `UPDATE collections + SET magic_metadata = $1, + updation_time=$2 + WHERE collection_id = $3`, + magicMetadata, updationTime, collectionID) + } + return stacktrace.Propagate(err, "") +} + +func (repo *CollectionRepository) GetSharedCollectionsCount(userID int64) (int64, error) { + row := repo.DB.QueryRow(`SELECT count(*) FROM collection_shares WHERE from_user_id = $1`, userID) + var count int64 = 0 + err := row.Scan(&count) + if err != nil { + return -1, stacktrace.Propagate(err, "") + } + return count, nil +} + +func (repo *CollectionRepository) GetCollectionCount(fileID int64) (int64, error) { + row := repo.DB.QueryRow(`SELECT count(*) FROM collection_files WHERE file_id = $1 and is_deleted = false`, fileID) + var count int64 = 0 + err := row.Scan(&count) + if err != nil { + return -1, stacktrace.Propagate(err, "") + } + return count, nil +} diff --git a/server/pkg/repo/datacleanup/repository.go b/server/pkg/repo/datacleanup/repository.go new file mode 100644 index 000000000..4870cecf5 --- /dev/null +++ b/server/pkg/repo/datacleanup/repository.go @@ -0,0 +1,133 @@ +package datacleanup + +import ( + "context" + "database/sql" + + entity "github.com/ente-io/museum/ente/data_cleanup" + "github.com/ente-io/museum/pkg/utils/time" + "github.com/ente-io/stacktrace" +) + +// Repository wraps out interaction related to data_cleanup database table +type Repository struct { + DB *sql.DB +} + +func (r *Repository) Insert(ctx context.Context, userID int64) error { + _, err := r.DB.ExecContext(ctx, `INSERT INTO data_cleanup(user_id) VALUES ($1)`, userID) + return stacktrace.Propagate(err, "failed to insert") +} + +func (r *Repository) GetItemsPendingCompletion(ctx context.Context, limit int) ([]*entity.DataCleanup, error) { + rows, err := r.DB.QueryContext(ctx, `SELECT user_id, stage, stage_schedule_time, stage_attempt_count, created_at, updated_at from data_cleanup + where stage != $1 and stage_schedule_time < now_utc_micro_seconds() + ORDER BY stage_schedule_time LIMIT $2`, entity.Completed, limit) + if err != nil { + return nil, err + } + defer rows.Close() + + result := make([]*entity.DataCleanup, 0) + + for rows.Next() { + item := entity.DataCleanup{} + if err = rows.Scan(&item.UserID, &item.Stage, &item.StageScheduleTime, &item.StageAttemptCount, &item.CreatedAt, &item.UpdatedAt); err != nil { + return nil, stacktrace.Propagate(err, "") + } + + result = append(result, &item) + } + return result, nil +} + +// MoveToNextStage update stage with corresponding schedule +func (r *Repository) MoveToNextStage(ctx context.Context, userID int64, stage entity.Stage, stageScheduleTime int64) error { + _, err := r.DB.ExecContext(ctx, `UPDATE data_cleanup SET stage = $1,stage_schedule_time = $2, stage_attempt_count=0 + WHERE user_id = $3`, stage, stageScheduleTime, userID) + return stacktrace.Propagate(err, "failed to insert/update") +} + +// ScheduleNextAttemptAfterNHours bumps the attempt count by one and schedule next attempt after n hr(s) +func (r *Repository) ScheduleNextAttemptAfterNHours(ctx context.Context, userID int64, n int8) error { + _, err := r.DB.ExecContext(ctx, `UPDATE data_cleanup SET stage_attempt_count = stage_attempt_count +1, stage_schedule_time = $1 + WHERE user_id = $2`, time.MicrosecondsAfterHours(n), userID) + return stacktrace.Propagate(err, "failed to insert/update") +} + +func (r *Repository) DeleteTableData(ctx context.Context, userID int64) error { + _, err := r.DB.ExecContext(ctx, `DELETE FROM key_attributes WHERE user_id = $1`, userID) + if err != nil { + return stacktrace.Propagate(err, "failed to delete key attributes data") + } + _, err = r.DB.ExecContext(ctx, `DELETE FROM authenticator_key WHERE user_id = $1`, userID) + if err != nil { + return stacktrace.Propagate(err, "failed to delete auth data") + } + _, err = r.DB.ExecContext(ctx, `DELETE FROM entity_key WHERE user_id = $1`, userID) + if err != nil { + return stacktrace.Propagate(err, "failed to delete entity key data") + } + // delete entity_data + _, err = r.DB.ExecContext(ctx, `DELETE FROM entity_data WHERE user_id = $1`, userID) + if err != nil { + return stacktrace.Propagate(err, "failed to delete entity data") + } + // deleting casting data + _, err = r.DB.ExecContext(ctx, `DELETE FROM casting WHERE cast_user = $1`, userID) + if err != nil { + return stacktrace.Propagate(err, "failed to delete casting data") + } + // delete notification_history data + _, err = r.DB.ExecContext(ctx, `DELETE FROM notification_history WHERE user_id = $1`, userID) + if err != nil { + return stacktrace.Propagate(err, "failed to delete notification history data") + } + // delete families data + _, err = r.DB.ExecContext(ctx, `DELETE FROM families WHERE admin_id = $1`, userID) + if err != nil { + return stacktrace.Propagate(err, "failed to delete family data") + } + + // delete passkeys (this also clears passkey_credentials via foreign key constraint) + _, err = r.DB.ExecContext(ctx, `DELETE FROM passkeys WHERE user_id = $1`, userID) + if err != nil { + return stacktrace.Propagate(err, "failed to delete passkeys data") + } + // delete passkey_login_sessions + _, err = r.DB.ExecContext(ctx, `DELETE FROM passkey_login_sessions WHERE user_id = $1`, userID) + if err != nil { + return stacktrace.Propagate(err, "failed to delete passkey login sessions data") + } + _, err = r.DB.ExecContext(ctx, `DELETE FROM remote_store WHERE user_id = $1`, userID) + if err != nil { + return stacktrace.Propagate(err, "failed to delete remote store data") + } + + // delete srp_auth data + _, err = r.DB.ExecContext(ctx, `DELETE FROM srp_auth WHERE user_id = $1`, userID) + if err != nil { + return stacktrace.Propagate(err, "failed to delete srp auth data") + } + // delete temp_srp_setup data + _, err = r.DB.ExecContext(ctx, `DELETE FROM temp_srp_setup WHERE user_id = $1`, userID) + if err != nil { + return stacktrace.Propagate(err, "failed to delete temp srp setup data") + } + // delete two_factor data + _, err = r.DB.ExecContext(ctx, `DELETE FROM two_factor WHERE user_id = $1`, userID) + if err != nil { + return stacktrace.Propagate(err, "failed to delete two factor data") + } + // delete tokens data + _, err = r.DB.ExecContext(ctx, `DELETE FROM tokens WHERE user_id = $1`, userID) + if err != nil { + return stacktrace.Propagate(err, "failed to delete tokens data") + } + // delete webauthn_sessions data + _, err = r.DB.ExecContext(ctx, `DELETE FROM webauthn_sessions WHERE user_id = $1`, userID) + if err != nil { + return stacktrace.Propagate(err, "failed to delete web auth sessions data") + } + return nil +} diff --git a/server/pkg/repo/embedding/repository.go b/server/pkg/repo/embedding/repository.go new file mode 100644 index 000000000..e44753b24 --- /dev/null +++ b/server/pkg/repo/embedding/repository.go @@ -0,0 +1,98 @@ +package embedding + +import ( + "context" + "database/sql" + "fmt" + + "github.com/ente-io/museum/ente" + "github.com/ente-io/stacktrace" + "github.com/sirupsen/logrus" +) + +// Repository defines the methods for inserting, updating and retrieving +// ML embedding +type Repository struct { + DB *sql.DB +} + +// Create inserts a new embedding + +func (r *Repository) InsertOrUpdate(ctx context.Context, ownerID int64, entry ente.InsertOrUpdateEmbeddingRequest) (ente.Embedding, error) { + var updatedAt int64 + err := r.DB.QueryRowContext(ctx, `INSERT INTO embeddings + (file_id, owner_id, model) + VALUES ($1, $2, $3) + ON CONFLICT ON CONSTRAINT unique_embeddings_file_id_model + DO UPDATE SET updated_at = now_utc_micro_seconds() + RETURNING updated_at`, entry.FileID, ownerID, entry.Model).Scan(&updatedAt) + if err != nil { + // check if error is due to model enum invalid value + if err.Error() == fmt.Sprintf("pq: invalid input value for enum model: \"%s\"", entry.Model) { + return ente.Embedding{}, stacktrace.Propagate(ente.ErrBadRequest, "invalid model value") + } + return ente.Embedding{}, stacktrace.Propagate(err, "") + } + return ente.Embedding{ + FileID: entry.FileID, + Model: entry.Model, + EncryptedEmbedding: entry.EncryptedEmbedding, + DecryptionHeader: entry.DecryptionHeader, + UpdatedAt: updatedAt, + }, nil +} + +// GetDiff returns the embeddings that have been updated since the given time +func (r *Repository) GetDiff(ctx context.Context, ownerID int64, model ente.Model, sinceTime int64, limit int16) ([]ente.Embedding, error) { + rows, err := r.DB.QueryContext(ctx, `SELECT file_id, model, encrypted_embedding, decryption_header, updated_at + FROM embeddings + WHERE owner_id = $1 AND model = $2 AND updated_at > $3 + ORDER BY updated_at ASC + LIMIT $4`, ownerID, model, sinceTime, limit) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + return convertRowsToEmbeddings(rows) +} + +func (r *Repository) DeleteAll(ctx context.Context, ownerID int64) error { + _, err := r.DB.ExecContext(ctx, "DELETE FROM embeddings WHERE owner_id = $1", ownerID) + if err != nil { + return stacktrace.Propagate(err, "") + } + return nil +} + +func (r *Repository) Delete(fileID int64) error { + _, err := r.DB.Exec("DELETE FROM embeddings WHERE file_id = $1", fileID) + if err != nil { + return stacktrace.Propagate(err, "") + } + return nil +} + +func convertRowsToEmbeddings(rows *sql.Rows) ([]ente.Embedding, error) { + defer func() { + if err := rows.Close(); err != nil { + logrus.Error(err) + } + }() + + result := make([]ente.Embedding, 0) + for rows.Next() { + embedding := ente.Embedding{} + var encryptedEmbedding, decryptionHeader sql.NullString + err := rows.Scan(&embedding.FileID, &embedding.Model, &encryptedEmbedding, &decryptionHeader, &embedding.UpdatedAt) + if encryptedEmbedding.Valid && len(encryptedEmbedding.String) > 0 { + embedding.EncryptedEmbedding = encryptedEmbedding.String + } + if decryptionHeader.Valid && len(decryptionHeader.String) > 0 { + embedding.DecryptionHeader = decryptionHeader.String + } + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + result = append(result, embedding) + } + return result, nil +} diff --git a/server/pkg/repo/family.go b/server/pkg/repo/family.go new file mode 100644 index 000000000..0a4cad516 --- /dev/null +++ b/server/pkg/repo/family.go @@ -0,0 +1,235 @@ +package repo + +import ( + "context" + "database/sql" + "errors" + + "github.com/ente-io/museum/ente" + "github.com/ente-io/stacktrace" + "github.com/google/uuid" + "github.com/lib/pq" +) + +// FamilyRepository is an implementation of the FamilyRepo +type FamilyRepository struct { + DB *sql.DB +} + +var ( + ActiveFamilyMemberStatus = []ente.MemberStatus{ente.ACCEPTED, ente.SELF} + ActiveOrInvitedFamilyMemberStatus = []ente.MemberStatus{ente.INVITED, ente.ACCEPTED, ente.SELF} +) + +// CreateFamily add the current user as the admin member. +func (repo *FamilyRepository) CreateFamily(ctx context.Context, adminID int64) error { + tx, err := repo.DB.BeginTx(ctx, nil) + if err != nil { + return stacktrace.Propagate(err, "") + } + _, err = tx.ExecContext(ctx, `INSERT INTO families(id, admin_id, member_id, status) + VALUES($1, $2, $3, $4) ON CONFLICT (admin_id,member_id) + DO UPDATE SET status = $4 WHERE families.status NOT IN ($4)`, uuid.New(), adminID, adminID, ente.SELF) + if err != nil { + tx.Rollback() + return stacktrace.Propagate(err, "") + } + + result, err := tx.ExecContext(ctx, `UPDATE users SET family_admin_id = $1 WHERE user_id = $2 and family_admin_id is null`, adminID, adminID) + if err != nil { + tx.Rollback() + return stacktrace.Propagate(err, "") + } + affected, err := result.RowsAffected() + if err != nil { + tx.Rollback() + return stacktrace.Propagate(err, "") + } + if affected != 1 { + tx.Rollback() + return stacktrace.Propagate(errors.New("exactly one row should be updated"), "") + } + return stacktrace.Propagate(tx.Commit(), "failed to commit txn creating family") +} + +func (repo *FamilyRepository) CloseFamily(ctx context.Context, adminID int64) error { + tx, err := repo.DB.BeginTx(ctx, nil) + if err != nil { + return stacktrace.Propagate(err, "") + } + affectResult, err := tx.ExecContext(ctx, `DELETE FROM families WHERE admin_id = $1`, adminID) + if err != nil { + tx.Rollback() + return stacktrace.Propagate(err, "") + } + affected, err := affectResult.RowsAffected() + if err != nil { + tx.Rollback() + return stacktrace.Propagate(err, "") + } + if affected != 1 { + tx.Rollback() + return stacktrace.Propagate(errors.New("exactly one row should be deleted"), "") + } + affectedRows, err := tx.ExecContext(ctx, `UPDATE users SET family_admin_id = null WHERE family_admin_id = $1`, adminID) + + if err != nil { + tx.Rollback() + return stacktrace.Propagate(err, "") + } + affected, err = affectedRows.RowsAffected() + if err != nil { + tx.Rollback() + return stacktrace.Propagate(err, "") + } + if affected != 1 { + return stacktrace.Propagate(errors.New("exactly one row should be updated"), "") + } + return stacktrace.Propagate(tx.Commit(), "failed to commit txn closing family") +} + +// AddMemberInvite inserts a family invitation entry for this given pair of admin & member and return the active inviteToken +// which can be used to accept the invite +func (repo *FamilyRepository) AddMemberInvite(ctx context.Context, adminID int64, memberID int64, inviteToken string) (string, error) { + if adminID == memberID { + return "", stacktrace.Propagate(errors.New("memberID and adminID can not be same"), "") + } + // on conflict, we should not change the status from 'ACCEPTED' to `INVITED`. + // Also, the token should not be updated if the user is already in `INVITED` state. + _, err := repo.DB.ExecContext(ctx, `INSERT INTO families(id, admin_id, member_id, status, token) + VALUES($1, $2, $3, $4, $5) ON CONFLICT (admin_id,member_id) + DO UPDATE SET(status, token) = ($4, $5) WHERE NOT (families.status = ANY($6))`, + uuid.New(), adminID, memberID, ente.INVITED, inviteToken, pq.Array([]ente.MemberStatus{ente.INVITED, ente.ACCEPTED})) + if err != nil { + return "", stacktrace.Propagate(err, "") + } + // separate query for fetch current token. Returning the same token in previous query was making query complex for + // the case when there's no DB update. + var activeInviteToken string + err = repo.DB.QueryRowContext(ctx, `SELECT token from families where admin_id = $1 and member_id = $2 and status = $3`, + adminID, memberID, ente.INVITED).Scan(&activeInviteToken) + return activeInviteToken, stacktrace.Propagate(err, "") +} + +// GetInvite returns information about family invitation for given token +func (repo *FamilyRepository) GetInvite(token string) (ente.FamilyMember, error) { + row := repo.DB.QueryRow(`SELECT id, admin_id, member_id, status from families WHERE token = $1`, token) + return repo.convertRowToFamilyMember(row) +} + +// GetMemberById returns information about a particular member in a family +func (repo *FamilyRepository) GetMemberById(ctx context.Context, id uuid.UUID) (ente.FamilyMember, error) { + row := repo.DB.QueryRowContext(ctx, `SELECT id, admin_id, member_id, status from families WHERE id = $1`, id) + return repo.convertRowToFamilyMember(row) +} + +func (repo *FamilyRepository) convertRowToFamilyMember(row *sql.Row) (ente.FamilyMember, error) { + var member ente.FamilyMember + err := row.Scan(&member.ID, &member.AdminUserID, &member.MemberUserID, &member.Status) + if err != nil { + return ente.FamilyMember{}, stacktrace.Propagate(err, "") + } + member.IsAdmin = member.MemberUserID == member.AdminUserID + return member, nil +} + +// GetMembersWithStatus returns all the members in a family managed by given inviter +func (repo *FamilyRepository) GetMembersWithStatus(adminID int64, statuses []ente.MemberStatus) ([]ente.FamilyMember, error) { + rows, err := repo.DB.Query(`SELECT id, admin_id, member_id, status from families + WHERE admin_id = $1 and status = ANY($2)`, adminID, pq.Array(statuses)) + + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + return convertRowsToFamilyMember(rows) +} + +// AcceptInvite change the invitation status in the family db for the given invite token +func (repo *FamilyRepository) AcceptInvite(ctx context.Context, adminID int64, memberID int64, token string) error { + tx, err := repo.DB.BeginTx(ctx, nil) + if err != nil { + return stacktrace.Propagate(err, "") + } + _, err = tx.ExecContext(ctx, `UPDATE families SET status = $1 WHERE token = $2`, ente.ACCEPTED, token) + if err != nil { + tx.Rollback() + return stacktrace.Propagate(err, "") + } + result, err := tx.ExecContext(ctx, `UPDATE users SET family_admin_id = $1 WHERE user_id = $2 and family_admin_id is null`, adminID, memberID) + if err != nil { + tx.Rollback() + return stacktrace.Propagate(err, "") + } + affected, err := result.RowsAffected() + if err != nil { + tx.Rollback() + return stacktrace.Propagate(err, "") + } + if affected != 1 { + tx.Rollback() + return stacktrace.Propagate(errors.New("exactly one row should be updated"), "") + } + return stacktrace.Propagate(tx.Commit(), "failed to commit txn for accepting family invite") +} + +// RemoveMember removes an existing member from the family plan +func (repo *FamilyRepository) RemoveMember(ctx context.Context, adminID int64, memberID int64, removeReason ente.MemberStatus) error { + tx, err := repo.DB.BeginTx(ctx, nil) + if err != nil { + return stacktrace.Propagate(err, "") + } + result, err := tx.ExecContext(ctx, `UPDATE families set status = $1 WHERE admin_id = $2 AND member_id = $3 AND status= $4`, removeReason, adminID, memberID, ente.ACCEPTED) + if err != nil { + tx.Rollback() + return stacktrace.Propagate(err, "") + } + affected, _ := result.RowsAffected() + if affected != 1 { + tx.Rollback() + return stacktrace.Propagate(errors.New("exactly one row should be updated"), "") + } + _, err = tx.ExecContext(ctx, `UPDATE users set family_admin_id = null WHERE user_id = $1 and family_admin_id = $2`, memberID, adminID) + if err != nil { + tx.Rollback() + return stacktrace.Propagate(err, "") + } + return stacktrace.Propagate(tx.Commit(), "failed to commit") +} + +// RevokeInvite revokes the invitation invite +func (repo *FamilyRepository) RevokeInvite(ctx context.Context, adminID int64, memberID int64) error { + tx, err := repo.DB.BeginTx(ctx, nil) + if err != nil { + return stacktrace.Propagate(err, "") + } + _, err = tx.ExecContext(ctx, `UPDATE families set status=$1 WHERE admin_id = $2 AND member_id = $3 AND status = $4`, ente.REVOKED, adminID, memberID, ente.INVITED) + if err != nil { + tx.Rollback() + return stacktrace.Propagate(err, "") + } + return stacktrace.Propagate(tx.Commit(), "failed to commit") +} + +// DeclineAnyPendingInvite is used for removing any pending invite for the user when their account is deleted +func (repo *FamilyRepository) DeclineAnyPendingInvite(ctx context.Context, memberID int64) error { + _, err := repo.DB.ExecContext(ctx, `UPDATE families set status=$1 WHERE member_id = $2 AND status = $3`, ente.DECLINED, memberID, ente.INVITED) + if err != nil { + return stacktrace.Propagate(err, "") + } + return nil +} + +func convertRowsToFamilyMember(rows *sql.Rows) ([]ente.FamilyMember, error) { + defer rows.Close() + familyMembers := make([]ente.FamilyMember, 0) + for rows.Next() { + var member ente.FamilyMember + err := rows.Scan(&member.ID, &member.AdminUserID, &member.MemberUserID, &member.Status) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + member.IsAdmin = member.MemberUserID == member.AdminUserID + familyMembers = append(familyMembers, member) + } + return familyMembers, nil +} diff --git a/server/pkg/repo/file.go b/server/pkg/repo/file.go new file mode 100644 index 000000000..ffa7ea048 --- /dev/null +++ b/server/pkg/repo/file.go @@ -0,0 +1,775 @@ +package repo + +import ( + "context" + "database/sql" + "errors" + "fmt" + "strconv" + "strings" + + "github.com/ente-io/stacktrace" + log "github.com/sirupsen/logrus" + + "github.com/ente-io/museum/ente" + "github.com/ente-io/museum/pkg/utils/s3config" + "github.com/ente-io/museum/pkg/utils/time" + "github.com/lib/pq" +) + +// FileRepository is an implementation of the FileRepo that +// persists and retrieves data from disk. +type FileRepository struct { + DB *sql.DB + S3Config *s3config.S3Config + QueueRepo *QueueRepository + ObjectRepo *ObjectRepository + ObjectCleanupRepo *ObjectCleanupRepository + ObjectCopiesRepo *ObjectCopiesRepository + UsageRepo *UsageRepository +} + +// Create creates an entry in the database for the given file +func (repo *FileRepository) Create( + file ente.File, + fileSize int64, + thumbnailSize int64, + usageDiff int64, + collectionOwnerID int64, + app ente.App, +) (ente.File, int64, error) { + hotDC := repo.S3Config.GetHotDataCenter() + dcsForNewEntry := pq.StringArray{hotDC} + + ctx := context.Background() + tx, err := repo.DB.BeginTx(ctx, nil) + if err != nil { + return file, -1, stacktrace.Propagate(err, "") + } + if file.OwnerID != collectionOwnerID { + return file, -1, stacktrace.Propagate(errors.New("both file and collection should belong to same owner"), "") + } + var fileID int64 + err = tx.QueryRowContext(ctx, `INSERT INTO files + (owner_id, encrypted_metadata, + file_decryption_header, thumbnail_decryption_header, metadata_decryption_header, + magic_metadata, pub_magic_metadata, info, updation_time) + VALUES($1, $2, $3, $4, $5, $6, $7, $8, $9) RETURNING file_id`, + file.OwnerID, file.Metadata.EncryptedData, file.File.DecryptionHeader, + file.Thumbnail.DecryptionHeader, file.Metadata.DecryptionHeader, + file.MagicMetadata, file.PubicMagicMetadata, file.Info, + file.UpdationTime).Scan(&fileID) + if err != nil { + tx.Rollback() + return file, -1, stacktrace.Propagate(err, "") + } + file.ID = fileID + _, err = tx.ExecContext(ctx, `INSERT INTO collection_files + (collection_id, file_id, encrypted_key, key_decryption_nonce, is_deleted, updation_time, c_owner_id, f_owner_id) + VALUES($1, $2, $3, $4, $5, $6, $7, $8)`, file.CollectionID, file.ID, + file.EncryptedKey, file.KeyDecryptionNonce, false, file.UpdationTime, file.OwnerID, collectionOwnerID) + if err != nil { + tx.Rollback() + return file, -1, stacktrace.Propagate(err, "") + } + _, err = tx.ExecContext(ctx, `UPDATE collections SET updation_time = $1 + WHERE collection_id = $2`, file.UpdationTime, file.CollectionID) + if err != nil { + tx.Rollback() + return file, -1, stacktrace.Propagate(err, "") + } + _, err = tx.ExecContext(ctx, `INSERT INTO object_keys(file_id, o_type, object_key, size, datacenters) + VALUES($1, $2, $3, $4, $5)`, fileID, ente.FILE, file.File.ObjectKey, fileSize, dcsForNewEntry) + if err != nil { + tx.Rollback() + if err.Error() == "pq: duplicate key value violates unique constraint \"object_keys_object_key_key\"" { + return file, -1, ente.ErrDuplicateFileObjectFound + } + return file, -1, stacktrace.Propagate(err, "") + } + _, err = tx.ExecContext(ctx, `INSERT INTO object_keys(file_id, o_type, object_key, size, datacenters) + VALUES($1, $2, $3, $4, $5)`, fileID, ente.THUMBNAIL, file.Thumbnail.ObjectKey, thumbnailSize, dcsForNewEntry) + if err != nil { + tx.Rollback() + if err.Error() == "pq: duplicate key value violates unique constraint \"object_keys_object_key_key\"" { + return file, -1, ente.ErrDuplicateThumbnailObjectFound + } + return file, -1, stacktrace.Propagate(err, "") + } + + err = repo.ObjectCleanupRepo.RemoveTempObjectKey(ctx, tx, file.File.ObjectKey, hotDC) + if err != nil { + tx.Rollback() + return file, -1, stacktrace.Propagate(err, "") + } + err = repo.ObjectCleanupRepo.RemoveTempObjectKey(ctx, tx, file.Thumbnail.ObjectKey, hotDC) + if err != nil { + tx.Rollback() + return file, -1, stacktrace.Propagate(err, "") + } + usage, err := repo.updateUsage(ctx, tx, file.OwnerID, usageDiff) + if err != nil { + tx.Rollback() + return file, -1, stacktrace.Propagate(err, "") + } + + err = repo.markAsNeedingReplication(ctx, tx, file, hotDC) + if err != nil { + tx.Rollback() + return file, -1, stacktrace.Propagate(err, "") + } + + err = tx.Commit() + if err != nil { + return file, -1, stacktrace.Propagate(err, "") + } + return file, usage, stacktrace.Propagate(err, "") +} + +// markAsNeedingReplication inserts new entries in object_copies, setting the +// current hot DC as the source copy. +// +// The higher layer above us (file controller) would've already checked that the +// object exists in the current hot DC (See `c.sizeOf` in file controller). This +// would cover cases where the client fetched presigned upload URLs for say +// hotDC1, but by the time they connected to museum, museum switched to using +// hotDC2. So then when museum would try to fetch the file size from hotDC2, the +// object won't be found there, and the upload would fail (which is the +// behaviour we want, since hot DC swaps are not a frequent/expected operation, +// we just wish to guarantee correctness if they do happen). +func (repo *FileRepository) markAsNeedingReplication(ctx context.Context, tx *sql.Tx, file ente.File, hotDC string) error { + if hotDC == repo.S3Config.GetHotBackblazeDC() { + err := repo.ObjectCopiesRepo.CreateNewB2Object(ctx, tx, file.File.ObjectKey, true, true) + if err != nil { + return stacktrace.Propagate(err, "") + } + err = repo.ObjectCopiesRepo.CreateNewB2Object(ctx, tx, file.Thumbnail.ObjectKey, true, false) + return stacktrace.Propagate(err, "") + } else if hotDC == repo.S3Config.GetHotWasabiDC() { + err := repo.ObjectCopiesRepo.CreateNewWasabiObject(ctx, tx, file.File.ObjectKey, true, true) + if err != nil { + return stacktrace.Propagate(err, "") + } + err = repo.ObjectCopiesRepo.CreateNewWasabiObject(ctx, tx, file.Thumbnail.ObjectKey, true, false) + return stacktrace.Propagate(err, "") + } else { + // Bail out if we're trying to add a new entry for a file but the + // primary hot DC is not one of the known types. + err := fmt.Errorf("only B2 and Wasabi DCs can be used for as the primary hot storage; instead, it was %s", hotDC) + return stacktrace.Propagate(err, "") + } +} + +// See markAsNeedingReplication - this variant is for updating only thumbnails. +func (repo *FileRepository) markThumbnailAsNeedingReplication(ctx context.Context, tx *sql.Tx, thumbnailObjectKey string, hotDC string) error { + if hotDC == repo.S3Config.GetHotBackblazeDC() { + err := repo.ObjectCopiesRepo.CreateNewB2Object(ctx, tx, thumbnailObjectKey, true, false) + return stacktrace.Propagate(err, "") + } else if hotDC == repo.S3Config.GetHotWasabiDC() { + err := repo.ObjectCopiesRepo.CreateNewWasabiObject(ctx, tx, thumbnailObjectKey, true, false) + return stacktrace.Propagate(err, "") + } else { + // Bail out if we're trying to add a new entry for a file but the + // primary hot DC is not one of the known types. + err := fmt.Errorf("only B2 and Wasabi DCs can be used for as the primary hot storage; instead, it was %s", hotDC) + return stacktrace.Propagate(err, "") + } +} + +// ResetNeedsReplication resets the replication status for an existing file +func (repo *FileRepository) ResetNeedsReplication(file ente.File, hotDC string) error { + if hotDC == repo.S3Config.GetHotBackblazeDC() { + err := repo.ObjectCopiesRepo.ResetNeedsWasabiReplication(file.File.ObjectKey) + if err != nil { + return stacktrace.Propagate(err, "") + } + err = repo.ObjectCopiesRepo.ResetNeedsScalewayReplication(file.File.ObjectKey) + if err != nil { + return stacktrace.Propagate(err, "") + } + + err = repo.ObjectCopiesRepo.ResetNeedsWasabiReplication(file.Thumbnail.ObjectKey) + return stacktrace.Propagate(err, "") + } else if hotDC == repo.S3Config.GetHotWasabiDC() { + err := repo.ObjectCopiesRepo.ResetNeedsB2Replication(file.File.ObjectKey) + if err != nil { + return stacktrace.Propagate(err, "") + } + err = repo.ObjectCopiesRepo.ResetNeedsScalewayReplication(file.File.ObjectKey) + if err != nil { + return stacktrace.Propagate(err, "") + } + + err = repo.ObjectCopiesRepo.ResetNeedsB2Replication(file.Thumbnail.ObjectKey) + return stacktrace.Propagate(err, "") + } else { + // Bail out if we're trying to update the replication flags but the + // primary hot DC is not one of the known types. + err := fmt.Errorf("only B2 and Wasabi DCs can be used for as the primary hot storage; instead, it was %s", hotDC) + return stacktrace.Propagate(err, "") + } +} + +// Update updates the entry in the database for the given file +func (repo *FileRepository) Update(file ente.File, fileSize int64, thumbnailSize int64, usageDiff int64, oldObjects []string, isDuplicateRequest bool) error { + hotDC := repo.S3Config.GetHotDataCenter() + dcsForNewEntry := pq.StringArray{hotDC} + + ctx := context.Background() + tx, err := repo.DB.BeginTx(ctx, nil) + if err != nil { + return stacktrace.Propagate(err, "") + } + _, err = tx.ExecContext(ctx, `UPDATE files SET encrypted_metadata = $1, + file_decryption_header = $2, thumbnail_decryption_header = $3, + metadata_decryption_header = $4, updation_time = $5 , info = $6 WHERE file_id = $7`, + file.Metadata.EncryptedData, file.File.DecryptionHeader, + file.Thumbnail.DecryptionHeader, file.Metadata.DecryptionHeader, + file.UpdationTime, file.Info, file.ID) + if err != nil { + tx.Rollback() + return stacktrace.Propagate(err, "") + } + updatedRows, err := tx.QueryContext(ctx, `UPDATE collection_files + SET updation_time = $1 WHERE file_id = $2 RETURNING collection_id`, file.UpdationTime, + file.ID) + if err != nil { + tx.Rollback() + return stacktrace.Propagate(err, "") + } + defer updatedRows.Close() + updatedCIDs := make([]int64, 0) + for updatedRows.Next() { + var cID int64 + err := updatedRows.Scan(&cID) + if err != nil { + return stacktrace.Propagate(err, "") + } + updatedCIDs = append(updatedCIDs, cID) + } + _, err = tx.ExecContext(ctx, `UPDATE collections SET updation_time = $1 + WHERE collection_id = ANY($2)`, file.UpdationTime, pq.Array(updatedCIDs)) + if err != nil { + tx.Rollback() + return stacktrace.Propagate(err, "") + } + _, err = tx.ExecContext(ctx, `DELETE FROM object_copies WHERE object_key = ANY($1)`, + pq.Array(oldObjects)) + if err != nil { + tx.Rollback() + return stacktrace.Propagate(err, "") + } + _, err = tx.ExecContext(ctx, `UPDATE object_keys + SET object_key = $1, size = $2, datacenters = $3 WHERE file_id = $4 AND o_type = $5`, + file.File.ObjectKey, fileSize, dcsForNewEntry, file.ID, ente.FILE) + if err != nil { + tx.Rollback() + return stacktrace.Propagate(err, "") + } + _, err = tx.ExecContext(ctx, `UPDATE object_keys + SET object_key = $1, size = $2, datacenters = $3 WHERE file_id = $4 AND o_type = $5`, + file.Thumbnail.ObjectKey, thumbnailSize, dcsForNewEntry, file.ID, ente.THUMBNAIL) + if err != nil { + tx.Rollback() + return stacktrace.Propagate(err, "") + } + _, err = repo.updateUsage(ctx, tx, file.OwnerID, usageDiff) + if err != nil { + tx.Rollback() + return stacktrace.Propagate(err, "") + } + err = repo.ObjectCleanupRepo.RemoveTempObjectKey(ctx, tx, file.File.ObjectKey, hotDC) + if err != nil { + tx.Rollback() + return stacktrace.Propagate(err, "") + } + err = repo.ObjectCleanupRepo.RemoveTempObjectKey(ctx, tx, file.Thumbnail.ObjectKey, hotDC) + if err != nil { + tx.Rollback() + return stacktrace.Propagate(err, "") + } + if isDuplicateRequest { + // Skip markAsNeedingReplication for duplicate requests, it'd fail with + // pq: duplicate key value violates unique constraint \"object_copies_pkey\" + // and render our transaction uncommittable + log.Infof("Skipping update of object_copies for a duplicate request to update file %d", file.ID) + } else { + err = repo.markAsNeedingReplication(ctx, tx, file, hotDC) + if err != nil { + tx.Rollback() + return stacktrace.Propagate(err, "") + } + } + err = repo.QueueRepo.AddItems(ctx, tx, OutdatedObjectsQueue, oldObjects) + if err != nil { + tx.Rollback() + return stacktrace.Propagate(err, "") + } + err = tx.Commit() + return stacktrace.Propagate(err, "") +} + +// UpdateMagicAttributes updates the magic attributes for the list of files and update collection_files & collection +// which have this file. +func (repo *FileRepository) UpdateMagicAttributes(ctx context.Context, fileUpdates []ente.UpdateMagicMetadata, isPublicMetadata bool) error { + updationTime := time.Microseconds() + tx, err := repo.DB.BeginTx(ctx, nil) + if err != nil { + return stacktrace.Propagate(err, "") + } + fileIDs := make([]int64, 0) + for _, update := range fileUpdates { + update.MagicMetadata.Version = update.MagicMetadata.Version + 1 + fileIDs = append(fileIDs, update.ID) + if isPublicMetadata { + _, err = tx.ExecContext(ctx, `UPDATE files SET pub_magic_metadata = $1, updation_time = $2 WHERE file_id = $3`, + update.MagicMetadata, updationTime, update.ID) + } else { + _, err = tx.ExecContext(ctx, `UPDATE files SET magic_metadata = $1, updation_time = $2 WHERE file_id = $3`, + update.MagicMetadata, updationTime, update.ID) + } + if err != nil { + if rollbackErr := tx.Rollback(); rollbackErr != nil { + log.WithError(rollbackErr).Error("transaction rollback failed") + return stacktrace.Propagate(rollbackErr, "") + } + return stacktrace.Propagate(err, "") + } + } + // todo: full table scan, need to add index (for discussion: add user_id and idx {user_id, file_id}). + updatedRows, err := tx.QueryContext(ctx, `UPDATE collection_files + SET updation_time = $1 WHERE file_id = ANY($2) AND is_deleted= false RETURNING collection_id`, updationTime, + pq.Array(fileIDs)) + if err != nil { + if rollbackErr := tx.Rollback(); rollbackErr != nil { + log.WithError(rollbackErr).Error("transaction rollback failed") + return stacktrace.Propagate(rollbackErr, "") + } + return stacktrace.Propagate(err, "") + } + defer updatedRows.Close() + updatedCIDs := make([]int64, 0) + for updatedRows.Next() { + var cID int64 + err := updatedRows.Scan(&cID) + if err != nil { + return stacktrace.Propagate(err, "") + } + updatedCIDs = append(updatedCIDs, cID) + } + _, err = tx.ExecContext(ctx, `UPDATE collections SET updation_time = $1 + WHERE collection_id = ANY($2)`, updationTime, pq.Array(updatedCIDs)) + if err != nil { + if rollbackErr := tx.Rollback(); rollbackErr != nil { + log.WithError(rollbackErr).Error("transaction rollback failed") + return stacktrace.Propagate(rollbackErr, "") + } + return stacktrace.Propagate(err, "") + } + return tx.Commit() +} + +// Update updates the entry in the database for the given file +func (repo *FileRepository) UpdateThumbnail(ctx context.Context, fileID int64, userID int64, thumbnail ente.FileAttributes, thumbnailSize int64, usageDiff int64, oldThumbnailObject *string) error { + hotDC := repo.S3Config.GetHotDataCenter() + dcsForNewEntry := pq.StringArray{hotDC} + + tx, err := repo.DB.BeginTx(ctx, nil) + if err != nil { + return stacktrace.Propagate(err, "") + } + updationTime := time.Microseconds() + _, err = tx.ExecContext(ctx, `UPDATE files SET + thumbnail_decryption_header = $1, + updation_time = $2 WHERE file_id = $3`, + thumbnail.DecryptionHeader, + updationTime, fileID) + if err != nil { + tx.Rollback() + return stacktrace.Propagate(err, "") + } + updatedRows, err := tx.QueryContext(ctx, `UPDATE collection_files + SET updation_time = $1 WHERE file_id = $2 RETURNING collection_id`, updationTime, + fileID) + if err != nil { + tx.Rollback() + return stacktrace.Propagate(err, "") + } + defer updatedRows.Close() + updatedCIDs := make([]int64, 0) + for updatedRows.Next() { + var cID int64 + err := updatedRows.Scan(&cID) + if err != nil { + return stacktrace.Propagate(err, "") + } + updatedCIDs = append(updatedCIDs, cID) + } + _, err = tx.ExecContext(ctx, `UPDATE collections SET updation_time = $1 + WHERE collection_id = ANY($2)`, updationTime, pq.Array(updatedCIDs)) + if err != nil { + tx.Rollback() + return stacktrace.Propagate(err, "") + } + if oldThumbnailObject != nil { + _, err = tx.ExecContext(ctx, `DELETE FROM object_copies WHERE object_key = $1`, + *oldThumbnailObject) + if err != nil { + tx.Rollback() + return stacktrace.Propagate(err, "") + } + } + _, err = tx.ExecContext(ctx, `UPDATE object_keys + SET object_key = $1, size = $2, datacenters = $3 WHERE file_id = $4 AND o_type = $5`, + thumbnail.ObjectKey, thumbnailSize, dcsForNewEntry, fileID, ente.THUMBNAIL) + if err != nil { + tx.Rollback() + return stacktrace.Propagate(err, "") + } + _, err = repo.updateUsage(ctx, tx, userID, usageDiff) + if err != nil { + tx.Rollback() + return stacktrace.Propagate(err, "") + } + + err = repo.ObjectCleanupRepo.RemoveTempObjectKey(ctx, tx, thumbnail.ObjectKey, hotDC) + if err != nil { + tx.Rollback() + return stacktrace.Propagate(err, "") + } + err = repo.markThumbnailAsNeedingReplication(ctx, tx, thumbnail.ObjectKey, hotDC) + if err != nil { + return stacktrace.Propagate(err, "") + } + if oldThumbnailObject != nil { + err = repo.QueueRepo.AddItems(ctx, tx, OutdatedObjectsQueue, []string{*oldThumbnailObject}) + if err != nil { + tx.Rollback() + return stacktrace.Propagate(err, "") + } + } + err = tx.Commit() + return stacktrace.Propagate(err, "") +} + +// GetOwnerID returns the ownerID for a file +func (repo *FileRepository) GetOwnerID(fileID int64) (int64, error) { + row := repo.DB.QueryRow(`SELECT owner_id FROM files WHERE file_id = $1`, + fileID) + var ownerID int64 + err := row.Scan(&ownerID) + return ownerID, stacktrace.Propagate(err, "failed to get file owner") +} + +// GetOwnerToFileCountMap will return a map of ownerId & number of files owned by that owner +func (repo *FileRepository) GetOwnerToFileCountMap(ctx context.Context, fileIDs []int64) (map[int64]int64, error) { + rows, err := repo.DB.QueryContext(ctx, `SELECT owner_id, count(*) FROM files WHERE file_id = ANY($1) group by owner_id`, + pq.Array(fileIDs)) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + defer rows.Close() + result := make(map[int64]int64, 0) + for rows.Next() { + var ownerID, count int64 + if err = rows.Scan(&ownerID, &count); err != nil { + return nil, stacktrace.Propagate(err, "") + } + result[ownerID] = count + } + return result, nil +} + +// GetOwnerToFileIDsMap will return a map of ownerId & number of files owned by that owner +func (repo *FileRepository) GetOwnerToFileIDsMap(ctx context.Context, fileIDs []int64) (map[int64][]int64, error) { + rows, err := repo.DB.QueryContext(ctx, `SELECT owner_id, file_id FROM files WHERE file_id = ANY($1)`, + pq.Array(fileIDs)) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + defer rows.Close() + result := make(map[int64][]int64, 0) + for rows.Next() { + var ownerID, fileID int64 + if err = rows.Scan(&ownerID, &fileID); err != nil { + return nil, stacktrace.Propagate(err, "") + } + if ownerFileIDs, ok := result[ownerID]; ok { + result[ownerID] = append(ownerFileIDs, fileID) + } else { + result[ownerID] = []int64{fileID} + } + } + return result, nil +} +func (repo *FileRepository) VerifyFileOwner(ctx context.Context, fileIDs []int64, ownerID int64, logger *log.Entry) error { + countMap, err := repo.GetOwnerToFileCountMap(ctx, fileIDs) + if err != nil { + return stacktrace.Propagate(err, "failed to get owners info") + } + logger = logger.WithFields(log.Fields{ + "owner_id": ownerID, + "file_ids": fileIDs, + "owners_map": countMap, + }) + if len(countMap) == 0 { + logger.Error("all fileIDs are invalid") + return stacktrace.Propagate(ente.ErrBadRequest, "") + } + if len(countMap) > 1 { + logger.Error("files are owned by multiple users") + return stacktrace.Propagate(ente.ErrPermissionDenied, "") + } + if filesOwned, ok := countMap[ownerID]; ok { + if filesOwned != int64(len(fileIDs)) { + logger.WithField("file_owned", filesOwned).Error("failed to find all fileIDs") + return stacktrace.Propagate(ente.ErrBadRequest, "") + } + return nil + } else { + logger.Error("user is not an owner of any file") + return stacktrace.Propagate(ente.ErrPermissionDenied, "") + } +} + +// GetOwnerAndMagicMetadata returns the ownerID and magicMetadata for given file id +func (repo *FileRepository) GetOwnerAndMagicMetadata(fileID int64, publicMetadata bool) (int64, *ente.MagicMetadata, error) { + var row *sql.Row + if publicMetadata { + row = repo.DB.QueryRow(`SELECT owner_id, pub_magic_metadata FROM files WHERE file_id = $1`, + fileID) + } else { + row = repo.DB.QueryRow(`SELECT owner_id, magic_metadata FROM files WHERE file_id = $1`, + fileID) + } + var ownerID int64 + var magicMetadata *ente.MagicMetadata + err := row.Scan(&ownerID, &magicMetadata) + return ownerID, magicMetadata, stacktrace.Propagate(err, "") +} + +// GetSize returns the size of files indicated by fileIDs that are owned by the given userID. +func (repo *FileRepository) GetSize(userID int64, fileIDs []int64) (int64, error) { + row := repo.DB.QueryRow(` + SELECT COALESCE(SUM(size), 0) FROM object_keys WHERE o_type = 'file' AND is_deleted = false AND file_id = ANY(SELECT file_id FROM files WHERE (file_id = ANY($1) AND owner_id = $2))`, + pq.Array(fileIDs), userID) + var size int64 + err := row.Scan(&size) + if err != nil { + return -1, stacktrace.Propagate(err, "") + } + return size, nil +} + +// GetFileCountForUser returns the total number of files in the system for a given user. +func (repo *FileRepository) GetFileCountForUser(userID int64, app ente.App) (int64, error) { + row := repo.DB.QueryRow(`SELECT count(distinct files.file_id) + FROM collection_files + JOIN collections c on c.owner_id = $1 and c.collection_id = collection_files.collection_id + JOIN files ON + files.owner_id = $1 AND files.file_id = collection_files.file_id + WHERE (c.app = $2 AND collection_files.is_deleted = false);`, userID, app) + + var fileCount int64 + err := row.Scan(&fileCount) + if err != nil { + return -1, stacktrace.Propagate(err, "") + } + return fileCount, nil +} + +func (repo *FileRepository) GetFileAttributesFromObjectKey(objectKey string) (ente.File, error) { + s3ObjectKeys, err := repo.ObjectRepo.GetAllFileObjectsByObjectKey(objectKey) + if err != nil { + return ente.File{}, stacktrace.Propagate(err, "") + } + if len(s3ObjectKeys) != 2 { + return ente.File{}, stacktrace.Propagate(fmt.Errorf("unexpected file count: %d", len(s3ObjectKeys)), "") + } + + var file ente.File + file.ID = s3ObjectKeys[0].FileID // all file IDs should be same as per query in GetAllFileObjectsByObjectKey + row := repo.DB.QueryRow(`SELECT owner_id, file_decryption_header, thumbnail_decryption_header, metadata_decryption_header, encrypted_metadata FROM files WHERE file_id = $1`, file.ID) + err = row.Scan(&file.OwnerID, + &file.File.DecryptionHeader, &file.Thumbnail.DecryptionHeader, + &file.Metadata.DecryptionHeader, + &file.Metadata.EncryptedData) + if err != nil { + return ente.File{}, err + } + for _, object := range s3ObjectKeys { + if object.Type == ente.FILE { + file.File.ObjectKey = object.ObjectKey + file.File.Size = object.FileSize + } else if object.Type == ente.THUMBNAIL { + file.Thumbnail.ObjectKey = object.ObjectKey + file.Thumbnail.Size = object.FileSize + } else { + err = fmt.Errorf("unexpted file type %s", object.Type) + return ente.File{}, stacktrace.Propagate(err, "") + } + } + return file, nil +} + +// GetUsage gets the Storage usage of a user +// Deprecated: GetUsage is deprecated, use UsageRepository.GetUsage +func (repo *FileRepository) GetUsage(userID int64) (int64, error) { + return repo.UsageRepo.GetUsage(userID) +} + +func (repo *FileRepository) DropFilesMetadata(ctx context.Context, fileIDs []int64) error { + // ensure that the fileIDs are not present in object_keys + rows, err := repo.DB.QueryContext(ctx, `SELECT distinct(file_id) FROM object_keys WHERE file_id = ANY($1)`, pq.Array(fileIDs)) + if err != nil { + return stacktrace.Propagate(err, "") + } + defer rows.Close() + fileIdsNotDeleted := make([]int64, 0) + for rows.Next() { + var fileID int64 + err := rows.Scan(&fileID) + if err != nil { + return stacktrace.Propagate(err, "") + } + fileIdsNotDeleted = append(fileIdsNotDeleted, fileID) + } + if len(fileIdsNotDeleted) > 0 { + return stacktrace.Propagate(fmt.Errorf("fileIDs %v are still present in object_keys", fileIdsNotDeleted), "") + } + _, err = repo.DB.ExecContext(ctx, ` + UPDATE files SET encrypted_metadata = '-', + metadata_decryption_header = '-', + file_decryption_header = '-', + thumbnail_decryption_header = '-', + magic_metadata = NULL, + pub_magic_metadata = NULL, + info = NULL + where file_id = ANY($1)`, pq.Array(fileIDs)) + return stacktrace.Propagate(err, "") +} + +// GetDuplicateFiles returns the list of files for a user that are of the same size +func (repo *FileRepository) GetDuplicateFiles(userID int64) ([]ente.DuplicateFiles, error) { + rows, err := repo.DB.Query(`SELECT string_agg(o.file_id::character varying, ','), o.size FROM object_keys o JOIN files f ON f.file_id = o.file_id + WHERE f.owner_id = $1 AND o.o_type = 'file' AND o.is_deleted = false + GROUP BY size + HAVING count(*) > 1;`, userID) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + defer rows.Close() + result := make([]ente.DuplicateFiles, 0) + for rows.Next() { + var res string + var size int64 + err := rows.Scan(&res, &size) + if err != nil { + return result, stacktrace.Propagate(err, "") + } + fileIDStrs := strings.Split(res, ",") + fileIDs := make([]int64, 0) + for _, fileIDStr := range fileIDStrs { + fileID, err := strconv.ParseInt(fileIDStr, 10, 64) + if err != nil { + return result, stacktrace.Propagate(err, "") + } + fileIDs = append(fileIDs, fileID) + } + result = append(result, ente.DuplicateFiles{FileIDs: fileIDs, Size: size}) + } + return result, nil +} + +func (repo *FileRepository) GetLargeThumbnailFiles(userID int64, threshold int64) ([]int64, error) { + rows, err := repo.DB.Query(` + SELECT file_id FROM object_keys WHERE o_type = 'thumbnail' AND is_deleted = false AND size >= $2 AND file_id = ANY(SELECT file_id FROM files WHERE owner_id = $1)`, + userID, threshold) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + defer rows.Close() + result := make([]int64, 0) + for rows.Next() { + var fileID int64 + err := rows.Scan(&fileID) + if err != nil { + return result, stacktrace.Propagate(err, "") + } + result = append(result, fileID) + } + return result, nil +} + +func (repo *FileRepository) GetTotalFileCount() (int64, error) { + // 9,522,438 is the magic number that accommodates the bumping up of fileIDs + // Doing this magic instead of count(*) since it's faster + row := repo.DB.QueryRow(`select (select max(file_id) from files) - (select 9522438)`) + var count int64 + err := row.Scan(&count) + return count, stacktrace.Propagate(err, "") +} + +func convertRowsToFiles(rows *sql.Rows) ([]ente.File, error) { + defer rows.Close() + files := make([]ente.File, 0) + for rows.Next() { + var ( + file ente.File + updationTime float64 + ) + err := rows.Scan(&file.ID, &file.OwnerID, &file.CollectionID, &file.CollectionOwnerID, + &file.EncryptedKey, &file.KeyDecryptionNonce, + &file.File.DecryptionHeader, &file.Thumbnail.DecryptionHeader, + &file.Metadata.DecryptionHeader, + &file.Metadata.EncryptedData, &file.MagicMetadata, &file.PubicMagicMetadata, + &file.Info, &file.IsDeleted, &updationTime) + if err != nil { + return files, stacktrace.Propagate(err, "") + } + file.UpdationTime = int64(updationTime) + files = append(files, file) + } + return files, nil +} + +// scheduleDeletion added a list of files's object ids to delete queue for deletion from datastore +func (repo *FileRepository) scheduleDeletion(ctx context.Context, tx *sql.Tx, fileIDs []int64, userID int64) error { + diff := int64(0) + + objectsToBeDeleted, err := repo.ObjectRepo.MarkObjectsAsDeletedForFileIDs(ctx, tx, fileIDs) + if err != nil { + return stacktrace.Propagate(err, "file object deletion failed for fileIDs: %v", fileIDs) + } + totalObjectSize := int64(0) + for _, object := range objectsToBeDeleted { + totalObjectSize += object.FileSize + } + diff = diff - (totalObjectSize) + _, err = repo.updateUsage(ctx, tx, userID, diff) + return stacktrace.Propagate(err, "") +} + +// updateUsage updates the storage usage of a user and returns the updated value +func (repo *FileRepository) updateUsage(ctx context.Context, tx *sql.Tx, userID int64, diff int64) (int64, error) { + row := tx.QueryRowContext(ctx, `SELECT storage_consumed FROM usage WHERE user_id = $1 FOR UPDATE`, userID) + var usage int64 + err := row.Scan(&usage) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + usage = 0 + } else { + return -1, stacktrace.Propagate(err, "") + } + } + newUsage := usage + diff + _, err = tx.ExecContext(ctx, `INSERT INTO usage (user_id, storage_consumed) + VALUES ($1, $2) + ON CONFLICT (user_id) DO UPDATE + SET storage_consumed = $2`, + userID, newUsage) + if err != nil { + return -1, stacktrace.Propagate(err, "") + } + return newUsage, nil +} diff --git a/server/pkg/repo/file_size.go b/server/pkg/repo/file_size.go new file mode 100644 index 000000000..cc2d7c0b3 --- /dev/null +++ b/server/pkg/repo/file_size.go @@ -0,0 +1,83 @@ +package repo + +import ( + "context" + "database/sql" + + "github.com/ente-io/museum/ente" + "github.com/ente-io/stacktrace" + "github.com/lib/pq" + log "github.com/sirupsen/logrus" +) + +// GetFilesInfo returns map of fileIDs to ente.FileInfo for a given userID. +func (repo *FileRepository) GetFilesInfo(ctx context.Context, fileIDs []int64, userID int64) (map[int64]*ente.FileInfo, error) { + rows, err := repo.DB.QueryContext(ctx, `SELECT file_id, info from files where file_id = ANY($1) and owner_id = $2`, pq.Array(fileIDs), userID) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + defer rows.Close() + result := make(map[int64]*ente.FileInfo, 0) + for rows.Next() { + var fileID int64 + var info *ente.FileInfo + if err = rows.Scan(&fileID, &info); err != nil { + return nil, stacktrace.Propagate(err, "") + } + result[fileID] = info + } + return result, nil +} + +// UpdateSizeInfo updates the size info for a given map of fileIDs to ente.FileInfo. +func (repo *FileRepository) UpdateSizeInfo(ctx context.Context, sizeInfo map[int64]*ente.FileInfo) error { + // Update the size info for each file using a batched transaction. + tx, err := repo.DB.BeginTx(ctx, nil) + if err != nil { + return stacktrace.Propagate(err, "") + } + defer tx.Rollback() + for fileID, info := range sizeInfo { + _, err := tx.ExecContext(ctx, `UPDATE files SET info = $1 WHERE file_id = $2 and info is NULL`, info, fileID) + if err != nil { + return stacktrace.Propagate(err, "") + } + } + if err := tx.Commit(); err != nil { + return stacktrace.Propagate(err, "") + } + return nil +} + +// GetFileInfoFromObjectKeys returns the file info for a given list of fileIDs. +func (repo *FileRepository) GetFileInfoFromObjectKeys(ctx context.Context, fileIDs []int64) (map[int64]*ente.FileInfo, error) { + rows, err := repo.DB.QueryContext(ctx, `SELECT file_id, size, o_type FROM object_keys WHERE file_id = ANY($1)`, pq.Array(fileIDs)) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + defer func(rows *sql.Rows) { + err := rows.Close() + if err != nil { + log.Errorf("error closing rows: %v", err) + } + }(rows) + result := make(map[int64]*ente.FileInfo, 0) + for rows.Next() { + var fileID int64 + var size int64 + var oType ente.ObjectType + if err = rows.Scan(&fileID, &size, &oType); err != nil { + return nil, stacktrace.Propagate(err, "") + } + if _, ok := result[fileID]; !ok { + result[fileID] = &ente.FileInfo{} + } + switch oType { + case ente.FILE: + result[fileID].FileSize = size + case ente.THUMBNAIL: + result[fileID].ThumbnailSize = size + } + } + return result, nil +} diff --git a/server/pkg/repo/kex/repository.go b/server/pkg/repo/kex/repository.go new file mode 100644 index 000000000..c88eb18b3 --- /dev/null +++ b/server/pkg/repo/kex/repository.go @@ -0,0 +1,80 @@ +package kex + +import ( + "context" + "crypto/rand" + "database/sql" + "encoding/hex" + "time" + + log "github.com/sirupsen/logrus" + + time_util "github.com/ente-io/museum/pkg/utils/time" + "github.com/ente-io/stacktrace" +) + +const ( + // KexStorageTTL is the time to live for a key exchange key + KexStorageTTLInMinutes = 60 +) + +type Repository struct { + DB *sql.DB +} + +// AddKey adds a wrapped key to KeyDB for retrieval withiin KexStorageTTL +func (r *Repository) AddKey(wrappedKey string, customIdentifier string) (identifier string, err error) { + + if customIdentifier != "" { + identifier = customIdentifier + } else { + // generate a random identifier + randomData := make([]byte, 8) + _, err = rand.Read(randomData) + if err != nil { + return "", err + } + identifier = hex.EncodeToString(randomData) + } + + // add to sql under "kex_store" table + _, err = r.DB.Exec("INSERT INTO kex_store (id, wrapped_key, added_at) VALUES ($1, $2, $3)", identifier, wrappedKey, time_util.Microseconds()) + if err != nil { + return "", err + } + + return +} + +// GetKey returns the wrapped key with an identifier and user ID and deletes it from KeyDB +func (r *Repository) GetKey(identifier string) (wrappedKey string, err error) { + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + // get the wrapped key from sql + row := r.DB.QueryRowContext(ctx, "SELECT wrapped_key FROM kex_store WHERE id = $1", identifier) + + err = row.Scan(&wrappedKey) + + if err != nil { + return "", stacktrace.Propagate(err, "") + } + + // delete the key from sql + go r.DB.Exec("DELETE FROM kex_store WHERE id = $1", identifier) + + return +} + +func (r *Repository) DeleteOldKeys() { + // go through keys where added_at < now - KexStorageTTL and delete them + breakTime := time_util.MicrosecondsBeforeMinutes(KexStorageTTLInMinutes) + _, err := r.DB.Exec("DELETE FROM kex_store WHERE added_at < $1", breakTime) + if err != nil { + log.Errorf("Error deleting old keys: %v", err) + return + } + + log.Infof("Deleted old keys less than %v old", breakTime) +} diff --git a/server/pkg/repo/locationtag/repository.go b/server/pkg/repo/locationtag/repository.go new file mode 100644 index 000000000..0373ff6cd --- /dev/null +++ b/server/pkg/repo/locationtag/repository.go @@ -0,0 +1,89 @@ +package locationtag + +import ( + "context" + "database/sql" + "fmt" + "github.com/ente-io/museum/ente" + "github.com/ente-io/stacktrace" + "github.com/google/uuid" + "github.com/sirupsen/logrus" +) + +// Repository defines the methods for inserting, updating and retrieving +// locationTag related entities from the underlying repository +type Repository struct { + DB *sql.DB +} + +// Create inserts a new &{ente.LocationTag} entry +func (r *Repository) Create(ctx context.Context, locationTag ente.LocationTag) (ente.LocationTag, error) { + err := r.DB.QueryRow(`INSERT into location_tag( + id, + user_id, + encrypted_key, + key_decryption_nonce, + attributes) VALUES ($1,$2,$3,$4,$5) RETURNING id,created_at,updated_at`, + uuid.New(), //$1 id + locationTag.OwnerID, // $2 user_id + locationTag.EncryptedKey, // $3 encrypted_key + locationTag.KeyDecryptionNonce, // $4 key_decryption_nonce + locationTag.Attributes). // %5 attributes + Scan(&locationTag.ID, &locationTag.CreatedAt, &locationTag.UpdatedAt) + if err != nil { + return ente.LocationTag{}, stacktrace.Propagate(err, "Failed to create locationTag") + } + return locationTag, nil +} + +// GetDiff returns the &{[]ente.LocationTag} which have been added or +// modified after the given sinceTime +func (r *Repository) GetDiff(ctx context.Context, ownerID int64, sinceTime int64, limit int16) ([]ente.LocationTag, error) { + rows, err := r.DB.Query(`SELECT + id, user_id, provider, encrypted_key, key_decryption_nonce, + attributes, is_deleted, created_at, updated_at + FROM location_tag + WHERE user_id = $1 + and updated_at > $2 + ORDER BY updated_at + LIMIT $3`, + ownerID, // $1 + sinceTime, // %2 + limit, // $3 + ) + if err != nil { + return nil, stacktrace.Propagate(err, "GetDiff query failed") + } + return convertRowsToLocationTags(rows) +} + +func (r *Repository) Delete(ctx context.Context, id string, ownerID int64) (bool, error) { + _, err := r.DB.ExecContext(ctx, + `UPDATE location_tag SET is_deleted=$1, attributes=$2 where id=$3 and user_id = $4`, + true, `{}`, // $1 is_deleted, $2 attr + id, ownerID) // $3 tagId, $4 ownerID + if err != nil { + return false, stacktrace.Propagate(err, fmt.Sprintf("faield to delele tag with id=%s", id)) + } + return true, nil +} + +func convertRowsToLocationTags(rows *sql.Rows) ([]ente.LocationTag, error) { + defer func() { + if err := rows.Close(); err != nil { + logrus.Error(err) + } + }() + locationTags := make([]ente.LocationTag, 0) + for rows.Next() { + tag := ente.LocationTag{} + err := rows.Scan( + &tag.ID, &tag.OwnerID, &tag.Provider, &tag.EncryptedKey, &tag.KeyDecryptionNonce, + &tag.Attributes, &tag.IsDeleted, &tag.CreatedAt, &tag.UpdatedAt) + if err != nil { + return nil, stacktrace.Propagate(err, "Failed to convert rowToLocationTag") + } + locationTags = append(locationTags, tag) + } + return locationTags, nil +} diff --git a/server/pkg/repo/notificationhistory.go b/server/pkg/repo/notificationhistory.go new file mode 100644 index 000000000..3e1baece2 --- /dev/null +++ b/server/pkg/repo/notificationhistory.go @@ -0,0 +1,31 @@ +package repo + +import ( + "database/sql" + "github.com/ente-io/stacktrace" + + "github.com/ente-io/museum/pkg/utils/time" +) + +type NotificationHistoryRepository struct { + DB *sql.DB +} + +func (repo *NotificationHistoryRepository) GetLastNotificationTime(userID int64, templateID string) (int64, error) { + var lastNotificationTime sql.NullInt64 + row := repo.DB.QueryRow(`SELECT MAX(sent_time) FROM notification_history WHERE user_id = $1 and template_id = $2`, userID, templateID) + err := row.Scan(&lastNotificationTime) + if err != nil { + return 0, stacktrace.Propagate(err, "") + } + if lastNotificationTime.Valid { + return lastNotificationTime.Int64, nil + } + return 0, nil +} + +func (repo *NotificationHistoryRepository) SetLastNotificationTimeToNow(userID int64, templateID string) error { + _, err := repo.DB.Exec(`INSERT INTO notification_history(user_id, template_id, sent_time) VALUES($1, $2, $3)`, + userID, templateID, time.Microseconds()) + return stacktrace.Propagate(err, "") +} diff --git a/server/pkg/repo/object.go b/server/pkg/repo/object.go new file mode 100644 index 000000000..f0cc5c6cf --- /dev/null +++ b/server/pkg/repo/object.go @@ -0,0 +1,204 @@ +package repo + +import ( + "context" + "database/sql" + "errors" + "math/rand" + "strconv" + + "github.com/ente-io/museum/ente" + "github.com/ente-io/stacktrace" + "github.com/lib/pq" +) + +type ObjectRepository struct { + DB *sql.DB + QueueRepo *QueueRepository +} + +func (repo *ObjectRepository) GetObjectsMissingInDC(dc string, limit int, random bool) ([]ente.S3ObjectKey, error) { + rows, err := repo.DB.Query(`SELECT file_id, o_type, object_key, size FROM object_keys + WHERE is_deleted = false AND NOT($1 = ANY(datacenters)) limit $2`, dc, limit) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + files, err := convertRowsToObjectKeys(rows) + if err != nil { + return files, stacktrace.Propagate(err, "") + } + + if random && files != nil && len(files) > 0 { + rand.Shuffle(len(files), func(i, j int) { files[i], files[j] = files[j], files[i] }) + } + + return files, nil +} + +func (repo *ObjectRepository) MarkObjectReplicated(objectKey string, datacenter string) (int64, error) { + result, err := repo.DB.Exec(`UPDATE object_keys SET datacenters = datacenters || $1::s3region WHERE object_key = $2`, + datacenter, objectKey) + if err != nil { + return 0, stacktrace.Propagate(err, "") + } + return result.RowsAffected() +} + +// GetObject returns the ente.S3ObjectKey key for a file id and type +func (repo *ObjectRepository) GetObject(fileID int64, objType ente.ObjectType) (ente.S3ObjectKey, error) { + // todo: handling of deleted objects + row := repo.DB.QueryRow(`SELECT object_key, size, o_type FROM object_keys WHERE file_id = $1 AND o_type = $2 AND is_deleted=false`, + fileID, objType) + var s3ObjectKey ente.S3ObjectKey + s3ObjectKey.FileID = fileID + err := row.Scan(&s3ObjectKey.ObjectKey, &s3ObjectKey.FileSize, &s3ObjectKey.Type) + return s3ObjectKey, stacktrace.Propagate(err, "") +} + +func (repo *ObjectRepository) GetAllFileObjectsByObjectKey(objectKey string) ([]ente.S3ObjectKey, error) { + rows, err := repo.DB.Query(`SELECT file_id, o_type, object_key, size from object_keys where file_id in + (select file_id from object_keys where object_key= $1) + and is_deleted=false`, objectKey) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + return convertRowsToObjectKeys(rows) +} + +func (repo *ObjectRepository) GetDataCentersForObject(objectKey string) ([]string, error) { + rows, err := repo.DB.Query(`select jsonb_array_elements_text(to_jsonb(datacenters)) from object_keys where object_key = $1`, objectKey) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + defer rows.Close() + datacenters := make([]string, 0) + for rows.Next() { + var dc string + err := rows.Scan(&dc) + if err != nil { + return datacenters, stacktrace.Propagate(err, "") + } + datacenters = append(datacenters, dc) + } + return datacenters, nil +} + +func (repo *ObjectRepository) RemoveDataCenterFromObject(objectKey string, datacenter string) error { + _, err := repo.DB.Exec(`UPDATE object_keys SET datacenters = array_remove(datacenters, $1) WHERE object_key = $2`, + datacenter, objectKey) + return stacktrace.Propagate(err, "") +} + +// RemoveObjectsForKey removes the keys of a deleted object from our tables +func (repo *ObjectRepository) RemoveObjectsForKey(objectKey string) error { + _, err := repo.DB.Exec(`DELETE FROM object_keys WHERE object_key = $1 AND is_deleted = TRUE`, + objectKey) + return stacktrace.Propagate(err, "") +} + +// MarkObjectsAsDeletedForFileIDs marks the object keys corresponding to the given filesIDs as deleted +// The actual deletion happens later when the queue is processed +func (repo *ObjectRepository) MarkObjectsAsDeletedForFileIDs(ctx context.Context, tx *sql.Tx, fileIDs []int64) ([]ente.S3ObjectKey, error) { + rows, err := tx.QueryContext(ctx, `SELECT file_id, o_type, object_key, size FROM object_keys + WHERE file_id = ANY($1) AND is_deleted=false FOR UPDATE`, pq.Array(fileIDs)) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + + s3ObjectKeys, err := convertRowsToObjectKeys(rows) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + + var keysToBeDeleted []string + for _, s3ObjectKey := range s3ObjectKeys { + keysToBeDeleted = append(keysToBeDeleted, s3ObjectKey.ObjectKey) + } + + err = repo.QueueRepo.AddItems(ctx, tx, RemoveComplianceHoldQueue, keysToBeDeleted) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + + err = repo.QueueRepo.AddItems(ctx, tx, DeleteObjectQueue, keysToBeDeleted) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + + var embeddingsToBeDeleted []string + for _, fileID := range fileIDs { + embeddingsToBeDeleted = append(embeddingsToBeDeleted, strconv.FormatInt(fileID, 10)) + } + + err = repo.QueueRepo.AddItems(ctx, tx, DeleteEmbeddingsQueue, embeddingsToBeDeleted) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + + _, err = tx.ExecContext(ctx, `UPDATE object_keys SET is_deleted = TRUE WHERE file_id = ANY($1)`, pq.Array(fileIDs)) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + return s3ObjectKeys, nil +} + +func convertRowsToObjectKeys(rows *sql.Rows) ([]ente.S3ObjectKey, error) { + defer rows.Close() + fileObjectKeys := make([]ente.S3ObjectKey, 0) + for rows.Next() { + var fileObjectKey ente.S3ObjectKey + err := rows.Scan(&fileObjectKey.FileID, &fileObjectKey.Type, &fileObjectKey.ObjectKey, &fileObjectKey.FileSize) + if err != nil { + return fileObjectKeys, stacktrace.Propagate(err, "") + } + fileObjectKeys = append(fileObjectKeys, fileObjectKey) + } + return fileObjectKeys, nil +} + +// DoesObjectExist returns the true if there is an entry for the object key. +func (repo *ObjectRepository) DoesObjectExist(tx *sql.Tx, objectKey string) (bool, error) { + var exists bool + err := tx.QueryRow( + `SELECT EXISTS (SELECT 1 FROM object_keys WHERE object_key = $1)`, + objectKey).Scan(&exists) + return exists, stacktrace.Propagate(err, "") +} + +// DoesObjectOrTempObjectExist returns the true if there is an entry for the object key in +// either the object_keys or in temp_objects table. +func (repo *ObjectRepository) DoesObjectOrTempObjectExist(objectKey string) (bool, error) { + var exists bool + err := repo.DB.QueryRow( + `SELECT (EXISTS (SELECT 1 FROM object_keys WHERE object_key = $1) OR + EXISTS (SELECT 1 FROM temp_objects WHERE object_key = $1))`, + objectKey).Scan(&exists) + return exists, stacktrace.Propagate(err, "") +} + +// GetObjectState returns various bits of information about an object that are +// useful in pre-flight checks during replication. +// +// Unknown objects (i.e. objectKeys for which there are no entries) are +// considered as deleted. +func (repo *ObjectRepository) GetObjectState(tx *sql.Tx, objectKey string) (ObjectState ente.ObjectState, err error) { + row := tx.QueryRow(` + SELECT ok.is_deleted, u.encrypted_email IS NULL AS is_user_deleted, ok.size + FROM object_keys ok + JOIN files f ON ok.file_id = f.file_id + JOIN users u ON f.owner_id = u.user_id + where object_key = $1 + `, objectKey) + var os ente.ObjectState + err = row.Scan(&os.IsFileDeleted, &os.IsUserDeleted, &os.Size) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + os.IsFileDeleted = true + os.IsUserDeleted = true + return os, nil + } + return os, stacktrace.Propagate(err, "Failed to fetch object state") + } + + return os, nil +} diff --git a/server/pkg/repo/object_cleanup.go b/server/pkg/repo/object_cleanup.go new file mode 100644 index 000000000..707412138 --- /dev/null +++ b/server/pkg/repo/object_cleanup.go @@ -0,0 +1,127 @@ +package repo + +import ( + "context" + "database/sql" + "errors" + + "github.com/ente-io/stacktrace" + log "github.com/sirupsen/logrus" + + "github.com/ente-io/museum/ente" + "github.com/ente-io/museum/pkg/utils/time" +) + +// ObjectCleanupRepository maintains state related to objects that might need to +// be cleaned up. +// +// In particular, all presigned urls start their life as a "temp object" that is +// liable to be cleaned up if not marked as a successful upload by the client. +type ObjectCleanupRepository struct { + DB *sql.DB +} + +// AddTempObject persists a given object identifier and it's expirationTime +func (repo *ObjectCleanupRepository) AddTempObject(tempObject ente.TempObject, expirationTime int64) error { + var err error + if tempObject.IsMultipart { + _, err = repo.DB.Exec(`INSERT INTO temp_objects(object_key, expiration_time,upload_id,is_multipart) + VALUES($1, $2, $3, $4)`, tempObject.ObjectKey, expirationTime, tempObject.UploadID, tempObject.IsMultipart) + } else { + _, err = repo.DB.Exec(`INSERT INTO temp_objects(object_key, expiration_time) + VALUES($1, $2)`, tempObject.ObjectKey, expirationTime) + } + return stacktrace.Propagate(err, "") +} + +// RemoveTempObjectKey removes a TempObject identified by its key and datacenter +func (repo *ObjectCleanupRepository) RemoveTempObjectKey(ctx context.Context, tx *sql.Tx, objectKey string, dc string) error { + _, err := tx.ExecContext(ctx, `DELETE FROM temp_objects WHERE object_key = $1`, objectKey) + return stacktrace.Propagate(err, "") +} + +// GetExpiredObjects returns the list of object keys that have expired +func (repo *ObjectCleanupRepository) GetAndLockExpiredObjects() (*sql.Tx, []ente.TempObject, error) { + tx, err := repo.DB.Begin() + if err != nil { + return nil, nil, stacktrace.Propagate(err, "") + } + + rollback := func() { + rerr := tx.Rollback() + if rerr != nil { + log.Errorf("Ignoring error when rolling back transaction: %s", rerr) + } + } + + commit := func() { + cerr := tx.Commit() + if cerr != nil { + log.Errorf("Ignoring error when committing transaction: %s", cerr) + } + } + + rows, err := tx.Query(` + SELECT object_key, is_multipart, upload_id FROM temp_objects + WHERE expiration_time <= $1 + LIMIT 1000 + FOR UPDATE SKIP LOCKED + `, time.Microseconds()) + + if err != nil && errors.Is(err, sql.ErrNoRows) { + commit() + return nil, nil, err + } + + if err != nil { + rollback() + return nil, nil, stacktrace.Propagate(err, "") + } + + defer rows.Close() + tempObjects := make([]ente.TempObject, 0) + for rows.Next() { + var tempObject ente.TempObject + var uploadID sql.NullString + err := rows.Scan(&tempObject.ObjectKey, &tempObject.IsMultipart, &uploadID) + if err != nil { + rollback() + return nil, nil, stacktrace.Propagate(err, "") + } + if tempObject.IsMultipart { + tempObject.UploadID = uploadID.String + } + tempObjects = append(tempObjects, tempObject) + } + return tx, tempObjects, nil +} + +// SetExpiryForTempObject sets the expiration_time for TempObject +func (repo *ObjectCleanupRepository) SetExpiryForTempObject(tx *sql.Tx, tempObject ente.TempObject, expirationTime int64) error { + if tempObject.IsMultipart { + _, err := tx.Exec(` + UPDATE temp_objects SET expiration_time = $1 WHERE object_key = $2 AND upload_id = $3 + `, expirationTime, tempObject.ObjectKey, tempObject.UploadID) + return stacktrace.Propagate(err, "") + } else { + _, err := tx.Exec(` + UPDATE temp_objects SET expiration_time = $1 WHERE object_key = $2 + `, expirationTime, tempObject.ObjectKey) + return stacktrace.Propagate(err, "") + } +} + +// RemoveTempObject removes a given TempObject +func (repo *ObjectCleanupRepository) RemoveTempObject(tx *sql.Tx, tempObject ente.TempObject) error { + if tempObject.IsMultipart { + _, err := tx.Exec(` + DELETE FROM temp_objects WHERE object_key = $1 AND upload_id = $2 + `, tempObject.ObjectKey, tempObject.UploadID) + return stacktrace.Propagate(err, "") + } else { + _, err := tx.Exec(` + DELETE FROM temp_objects WHERE object_key = $1 + `, tempObject.ObjectKey) + return stacktrace.Propagate(err, "") + } +} diff --git a/server/pkg/repo/object_copies.go b/server/pkg/repo/object_copies.go new file mode 100644 index 000000000..7e1bb58f0 --- /dev/null +++ b/server/pkg/repo/object_copies.go @@ -0,0 +1,182 @@ +package repo + +import ( + "context" + "database/sql" + "errors" + "fmt" + + "github.com/ente-io/museum/ente" + "github.com/ente-io/stacktrace" + log "github.com/sirupsen/logrus" +) + +// ObjectCopiesRepository wraps over our interaction with the database related +// to the object_copies table. +type ObjectCopiesRepository struct { + DB *sql.DB +} + +// GetAndLockUnreplicatedObject gets an object which is not yet replicated to +// all the replicas. It also starts a transaction to keep the row corresponding +// to that object in the database locked. +// +// Both tx and objectCopies are guaranteed to be nil if error is not nil. +// +// If the returned transaction is not `nil`, it must be either `Rollback`ed or +// `Commit`ed. +func (repo *ObjectCopiesRepository) GetAndLockUnreplicatedObject() (*sql.Tx, *ente.ObjectCopies, error) { + tx, err := repo.DB.Begin() + if err != nil { + return nil, nil, stacktrace.Propagate(err, "") + } + + rollback := func() { + rerr := tx.Rollback() + if rerr != nil { + log.Errorf("Ignoring error when rolling back transaction: %s", rerr) + } + } + + commit := func() { + cerr := tx.Commit() + if cerr != nil { + log.Errorf("Ignoring error when committing transaction: %s", cerr) + } + } + + row := tx.QueryRow(` + SELECT object_key, want_b2, b2, want_wasabi, wasabi, want_scw, scw + FROM object_copies + WHERE ( + ( + (wasabi IS NULL AND want_wasabi = true) OR + (scw IS NULL AND want_scw = true) + ) AND last_attempt < (now_utc_micro_seconds() - (24::BIGINT * 60 * 60 * 1000 * 1000)) + ) + LIMIT 1 + FOR UPDATE SKIP LOCKED + `) + + var r ente.ObjectCopies + err = row.Scan(&r.ObjectKey, &r.WantB2, &r.B2, &r.WantWasabi, &r.Wasabi, + &r.WantSCW, &r.SCW) + + if err != nil && errors.Is(err, sql.ErrNoRows) { + commit() + return nil, nil, err + } + + if err != nil { + rollback() + return nil, nil, stacktrace.Propagate(err, "") + } + + return tx, &r, nil +} + +// CreateNewB2Object creates a new entry for objectKey and marks it as having +// being replicated to B2. It then sets provided flags to mark this object as +// requiring replication where needed. +// +// This operation runs within the context of a transaction that creates the +// initial entry for the file in the database; thus, it gets passed ctx and tx +// which it uses to scope its own DB changes. +func (repo *ObjectCopiesRepository) CreateNewB2Object(ctx context.Context, tx *sql.Tx, objectKey string, wantWasabi bool, wantScaleway bool) error { + _, err := tx.ExecContext(ctx, ` + INSERT INTO object_copies (object_key, want_b2, b2, want_wasabi, want_scw) + VALUES ($1, true, now_utc_micro_seconds(), $2, $3) + `, objectKey, wantWasabi, wantScaleway) + return stacktrace.Propagate(err, "") +} + +// CreateNewWasabiObject creates a new entry for objectKey and marks it as having +// being replicated to Wasabi. +// +// See CreateNewB2Object for details. +func (repo *ObjectCopiesRepository) CreateNewWasabiObject(ctx context.Context, tx *sql.Tx, objectKey string, wantB2 bool, wantScaleway bool) error { + _, err := tx.ExecContext(ctx, ` + INSERT INTO object_copies (object_key, want_wasabi, wasabi, want_b2, want_scw) + VALUES ($1, true, now_utc_micro_seconds(), $2, $3) + `, objectKey, wantB2, wantScaleway) + return stacktrace.Propagate(err, "") +} + +// RegisterReplicationAttempt sets the last_attempt timestamp so that this row can +// be skipped over for the next day in case the replication was not succesful. +func (repo *ObjectCopiesRepository) RegisterReplicationAttempt(tx *sql.Tx, objectKey string) error { + _, err := tx.Exec(` + UPDATE object_copies + SET last_attempt = now_utc_micro_seconds() + WHERE object_key = $1 + `, objectKey) + return stacktrace.Propagate(err, "") +} + +// ResetNeedsB2Replication modifies the db to indicate that objectKey should be +// re-replicated to Backblaze even if it has already been replicated there. +func (repo *ObjectCopiesRepository) ResetNeedsB2Replication(objectKey string) error { + _, err := repo.DB.Exec(`UPDATE object_copies SET b2 = null WHERE object_key = $1`, + objectKey) + return stacktrace.Propagate(err, "") +} + +// ResetNeedsWasabiReplication modifies the db to indicate that objectKey should +// be re-replicated to Wasabi even if it has already been replicated there. +func (repo *ObjectCopiesRepository) ResetNeedsWasabiReplication(objectKey string) error { + _, err := repo.DB.Exec(`UPDATE object_copies SET wasabi = null WHERE object_key = $1`, + objectKey) + return stacktrace.Propagate(err, "") +} + +// ResetNeedsScalewayReplication modifies the db to indicate that objectKey +// should be re-replicated to Scaleway even if it has already been replicated there. +func (repo *ObjectCopiesRepository) ResetNeedsScalewayReplication(objectKey string) error { + _, err := repo.DB.Exec(`UPDATE object_copies SET scw = null WHERE object_key = $1`, + objectKey) + return stacktrace.Propagate(err, "") +} + +// UnmarkFromReplication clears the want_* flags so that this objectKey is +// marked as not requiring further replication. +func (repo *ObjectCopiesRepository) UnmarkFromReplication(tx *sql.Tx, objectKey string) error { + _, err := tx.Exec(` + UPDATE object_copies + SET want_b2 = false, want_wasabi = false, want_scw = false + WHERE object_key = $1 + `, objectKey) + return stacktrace.Propagate(err, "") +} + +// MarkObjectReplicatedB2 sets the time when `objectKey` was replicated to +// Wasabi to the current timestamp. +func (repo *ObjectCopiesRepository) MarkObjectReplicatedWasabi(tx *sql.Tx, objectKey string) error { + return repo.markObjectReplicated(` + UPDATE object_copies SET wasabi = now_utc_micro_seconds() + WHERE object_key = $1 + `, tx, objectKey) +} + +// MarkObjectReplicatedScaleway sets the time when `objectKey` was replicated to +// Wasabi to the current timestamp. +func (repo *ObjectCopiesRepository) MarkObjectReplicatedScaleway(tx *sql.Tx, objectKey string) error { + return repo.markObjectReplicated(` + UPDATE object_copies SET scw = now_utc_micro_seconds() + WHERE object_key = $1 + `, tx, objectKey) +} + +func (repo *ObjectCopiesRepository) markObjectReplicated(query string, tx *sql.Tx, objectKey string) error { + result, err := tx.Exec(query, objectKey) + if err != nil { + return stacktrace.Propagate(err, "") + } + c, err := result.RowsAffected() + if err != nil { + return stacktrace.Propagate(err, "") + } + if c != 1 { + return stacktrace.Propagate(fmt.Errorf("expected 1 row to be updated, but got %d", c), "") + } + return nil +} diff --git a/server/pkg/repo/passkey/credential.go b/server/pkg/repo/passkey/credential.go new file mode 100644 index 000000000..7d80070a9 --- /dev/null +++ b/server/pkg/repo/passkey/credential.go @@ -0,0 +1,76 @@ +package passkey + +import ( + "time" + + "github.com/ente-io/museum/ente" + "github.com/google/uuid" +) + +func (r *Repository) createPasskey(userID int64, friendlyName string) (newPasskey *ente.Passkey, err error) { + + newPasskey = &ente.Passkey{ + ID: uuid.New(), + UserID: userID, + FriendlyName: friendlyName, + CreatedAt: time.Now().UnixMicro(), + } + + _, err = r.DB.Exec(` + INSERT INTO passkeys ( + id, + user_id, + friendly_name, + created_at + ) VALUES ( + $1, + $2, + $3, + $4 + ) + `, + newPasskey.ID, + newPasskey.UserID, + newPasskey.FriendlyName, + newPasskey.CreatedAt, + ) + + return +} + +func (r *Repository) createPasskeyCredential(credential *ente.PasskeyCredential) (err error) { + _, err = r.DB.Exec(` + INSERT INTO passkey_credentials( + passkey_id, + public_key, + attestation_type, + authenticator_transports, + credential_flags, + authenticator, + created_at, + credential_id + ) VALUES ( + $1, + $2, + $3, + $4, + $5, + $6, + $7, + $8 + )`, + credential.PasskeyID, + credential.PublicKey, + credential.AttestationType, + credential.AuthenticatorTransports, + credential.CredentialFlags, + credential.Authenticator, + credential.CreatedAt, + credential.CredentialID, + ) + if err != nil { + return + } + + return +} diff --git a/server/pkg/repo/passkey/passkey.go b/server/pkg/repo/passkey/passkey.go new file mode 100644 index 000000000..1627ab77d --- /dev/null +++ b/server/pkg/repo/passkey/passkey.go @@ -0,0 +1,483 @@ +package passkey + +import ( + "database/sql" + "encoding/base64" + "encoding/json" + "net/http" + "strings" + "time" + + ente_time "github.com/ente-io/museum/pkg/utils/time" + "github.com/ente-io/stacktrace" + "github.com/go-webauthn/webauthn/protocol" + "github.com/google/uuid" + "github.com/spf13/viper" + + "github.com/ente-io/museum/ente" + "github.com/ente-io/museum/pkg/utils/byteMarshaller" + "github.com/go-webauthn/webauthn/webauthn" +) + +type Repository struct { + DB *sql.DB + webAuthnInstance *webauthn.WebAuthn +} + +type PasskeyUser struct { + *ente.User + repo *Repository +} + +func (u *PasskeyUser) WebAuthnID() []byte { + b, _ := byteMarshaller.ConvertInt64ToByte(u.ID) + return b +} + +func (u *PasskeyUser) WebAuthnName() string { + return u.Email +} + +func (u *PasskeyUser) WebAuthnDisplayName() string { + return u.Name +} + +func (u *PasskeyUser) WebAuthnCredentials() []webauthn.Credential { + creds, err := u.repo.GetUserPasskeyCredentials(u.ID) + if err != nil { + return []webauthn.Credential{} + } + + return creds +} + +func (u *PasskeyUser) WebAuthnIcon() string { + // this specification is deprecated but the interface requires it + return "" +} + +func NewRepository( + db *sql.DB, +) (repo *Repository, err error) { + rpId := viper.GetString("webauthn.rpid") + if rpId == "" { + rpId = "accounts.ente.io" + } + rpOrigins := viper.GetStringSlice("webauthn.rporigins") + + wconfig := &webauthn.Config{ + RPDisplayName: "Ente", + RPID: rpId, + RPOrigins: rpOrigins, + Timeouts: webauthn.TimeoutsConfig{ + Login: webauthn.TimeoutConfig{ + Enforce: true, + Timeout: time.Duration(5) * time.Minute, + }, + Registration: webauthn.TimeoutConfig{ + Enforce: true, + Timeout: time.Duration(5) * time.Minute, + }, + }, + } + + webAuthnInstance, err := webauthn.New(wconfig) + if err != nil { + return + } + + repo = &Repository{ + DB: db, + webAuthnInstance: webAuthnInstance, + } + + return +} + +func (r *Repository) GetUserPasskeys(userID int64) (passkeys []ente.Passkey, err error) { + rows, err := r.DB.Query(` + SELECT id, user_id, friendly_name, created_at + FROM passkeys + WHERE user_id = $1 AND deleted_at IS NULL + `, userID) + if err != nil { + err = stacktrace.Propagate(err, "") + return + } + defer rows.Close() + + for rows.Next() { + var passkey ente.Passkey + if err = rows.Scan( + &passkey.ID, + &passkey.UserID, + &passkey.FriendlyName, + &passkey.CreatedAt, + ); err != nil { + err = stacktrace.Propagate(err, "") + return + } + + passkeys = append(passkeys, passkey) + } + + return +} + +func (r *Repository) CreateBeginRegistrationData(user *ente.User) (options *protocol.CredentialCreation, session *webauthn.SessionData, id uuid.UUID, err error) { + passkeyUser := &PasskeyUser{ + User: user, + repo: r, + } + + if len(passkeyUser.WebAuthnCredentials()) >= ente.MaxPasskeys { + err = stacktrace.NewError(ente.ErrMaxPasskeysReached.Error()) + return + } + + options, session, err = r.webAuthnInstance.BeginRegistration(passkeyUser) + if err != nil { + err = stacktrace.Propagate(err, "") + return + } + + // save session data + marshalledSessionData, err := r.marshalSessionDataToWebAuthnSession(session) + if err != nil { + err = stacktrace.Propagate(err, "") + return + } + + id = uuid.New() + + err = r.saveSessionData(id, marshalledSessionData) + if err != nil { + err = stacktrace.Propagate(err, "") + return + } + + return +} + +func (r *Repository) AddPasskeyTwoFactorSession(userID int64, sessionID string, expirationTime int64) error { + _, err := r.DB.Exec(`INSERT INTO passkey_login_sessions(user_id, session_id, creation_time, expiration_time) VALUES($1, $2, $3, $4)`, + userID, sessionID, ente_time.Microseconds(), expirationTime) + return stacktrace.Propagate(err, "") +} + +func (r *Repository) GetUserIDWithPasskeyTwoFactorSession(sessionID string) (userID int64, err error) { + err = r.DB.QueryRow(`SELECT user_id FROM passkey_login_sessions WHERE session_id = $1`, sessionID).Scan(&userID) + return +} + +func (r *Repository) CreateBeginAuthenticationData(user *ente.User) (options *protocol.CredentialAssertion, session *webauthn.SessionData, id uuid.UUID, err error) { + passkeyUser := &PasskeyUser{ + User: user, + repo: r, + } + + options, session, err = r.webAuthnInstance.BeginLogin(passkeyUser) + if err != nil { + err = stacktrace.Propagate(err, "") + return + } + + // save session data + marshalledSessionData, err := r.marshalSessionDataToWebAuthnSession(session) + if err != nil { + err = stacktrace.Propagate(err, "") + return + } + + id = uuid.New() + + err = r.saveSessionData(id, marshalledSessionData) + if err != nil { + err = stacktrace.Propagate(err, "") + return + } + + return +} + +func (r *Repository) FinishRegistration(user *ente.User, friendlyName string, req *http.Request, sessionID uuid.UUID) (err error) { + passkeyUser := &PasskeyUser{ + User: user, + repo: r, + } + + session, err := r.getWebAuthnSessionByID(sessionID) + if err != nil { + err = stacktrace.Propagate(err, "") + return + } + + if session.UserID != user.ID { + err = stacktrace.NewError("session does not belong to user") + return + } + + sessionData, err := session.SessionData() + if err != nil { + err = stacktrace.Propagate(err, "") + return + } + + if time.Now().After(sessionData.Expires) { + err = stacktrace.NewError("session expired") + return + } + + credential, err := r.webAuthnInstance.FinishRegistration(passkeyUser, *sessionData, req) + if err != nil { + err = stacktrace.Propagate(err, "") + return + } + + newPasskey, err := r.createPasskey(user.ID, friendlyName) + if err != nil { + err = stacktrace.Propagate(err, "") + return + } + + passkeyCredential, err := r.marshalCredentialToPasskeyCredential(credential, newPasskey.ID) + if err != nil { + err = stacktrace.Propagate(err, "") + return + } + + err = r.createPasskeyCredential(passkeyCredential) + if err != nil { + err = stacktrace.Propagate(err, "") + return + } + + return +} + +func (r *Repository) FinishAuthentication(user *ente.User, req *http.Request, sessionID uuid.UUID) (err error) { + passkeyUser := &PasskeyUser{ + User: user, + repo: r, + } + + session, err := r.getWebAuthnSessionByID(sessionID) + if err != nil { + err = stacktrace.Propagate(err, "") + return + } + + if session.UserID != user.ID { + err = stacktrace.NewError("session does not belong to user") + return + } + + sessionData, err := session.SessionData() + if err != nil { + err = stacktrace.Propagate(err, "") + return + } + + if time.Now().After(sessionData.Expires) { + err = stacktrace.NewError("session expired") + return + } + + _, err = r.webAuthnInstance.FinishLogin(passkeyUser, *sessionData, req) + if err != nil { + err = stacktrace.Propagate(err, "") + return + } + + return +} + +func (r *Repository) DeletePasskey(user *ente.User, passkeyID uuid.UUID) (err error) { + _, err = r.DB.Exec(` + UPDATE passkeys + SET friendly_name = $1, + deleted_at = $2 + WHERE id = $3 AND user_id = $4 AND deleted_at IS NULL + `, passkeyID, ente_time.Microseconds(), passkeyID, user.ID) + if err != nil { + err = stacktrace.Propagate(err, "") + return + } + + return +} + +func (r *Repository) RenamePasskey(user *ente.User, passkeyID uuid.UUID, newName string) (err error) { + _, err = r.DB.Exec(` + UPDATE passkeys + SET friendly_name = $1 + WHERE id = $2 AND user_id = $3 AND deleted_at IS NULL + `, newName, passkeyID, user.ID) + if err != nil { + err = stacktrace.Propagate(err, "") + return + } + + return +} + +func (r *Repository) saveSessionData(id uuid.UUID, session *ente.WebAuthnSession) (err error) { + _, err = r.DB.Exec(` + INSERT INTO webauthn_sessions ( + id, + challenge, + user_id, + allowed_credential_ids, + expires_at, + user_verification_requirement, + extensions, + created_at + ) VALUES ( + $1, + $2, + $3, + $4, + $5, + $6, + $7, + $8 + ) + `, + id, + session.Challenge, + session.UserID, + session.AllowedCredentialIDs, + session.ExpiresAt, + session.UserVerificationRequirement, + session.Extensions, + session.CreatedAt, + ) + return +} + +func (r *Repository) marshalCredentialToPasskeyCredential(cred *webauthn.Credential, passkeyID uuid.UUID) (*ente.PasskeyCredential, error) { + // Convert the PublicKey to base64 + publicKeyB64 := base64.StdEncoding.EncodeToString(cred.PublicKey) + + // Convert the Transports slice to a comma-separated string + var transports []string + for _, t := range cred.Transport { + transports = append(transports, string(t)) + } + authenticatorTransports := strings.Join(transports, ",") + + // Marshal the Flags to JSON + credentialFlags, err := json.Marshal(cred.Flags) + if err != nil { + return nil, err + } + + // Marshal the Authenticator to JSON and encode AAGUID to base64 + authenticatorMap := map[string]interface{}{ + "AAGUID": base64.StdEncoding.EncodeToString(cred.Authenticator.AAGUID), + "SignCount": cred.Authenticator.SignCount, + "CloneWarning": cred.Authenticator.CloneWarning, + "Attachment": cred.Authenticator.Attachment, + } + authenticatorJSON, err := json.Marshal(authenticatorMap) + if err != nil { + return nil, err + } + + // convert cred.ID into base64 + credID := base64.StdEncoding.EncodeToString(cred.ID) + + // Create the PasskeyCredential + passkeyCred := &ente.PasskeyCredential{ + CredentialID: credID, + PasskeyID: passkeyID, + PublicKey: publicKeyB64, + AttestationType: cred.AttestationType, + AuthenticatorTransports: authenticatorTransports, + CredentialFlags: string(credentialFlags), + Authenticator: string(authenticatorJSON), + CreatedAt: time.Now().UnixMicro(), + } + + return passkeyCred, nil +} + +func (r *Repository) marshalSessionDataToWebAuthnSession(session *webauthn.SessionData) (webAuthnSession *ente.WebAuthnSession, err error) { + + userID, err := byteMarshaller.ConvertBytesToInt64(session.UserID) + if err != nil { + return + } + + extensionsJson, err := json.Marshal(session.Extensions) + if err != nil { + return + } + + newWebAuthnSession := &ente.WebAuthnSession{ + Challenge: session.Challenge, + UserID: userID, + AllowedCredentialIDs: byteMarshaller.EncodeSlices(session.AllowedCredentialIDs), + ExpiresAt: session.Expires.UnixMicro(), + UserVerificationRequirement: string(session.UserVerification), + Extensions: string(extensionsJson), + CreatedAt: time.Now().UnixMicro(), + } + + return newWebAuthnSession, nil +} + +func (r *Repository) GetUserPasskeyCredentials(userID int64) (credentials []webauthn.Credential, err error) { + rows, err := r.DB.Query(` + SELECT pc.* + FROM passkey_credentials pc + JOIN passkeys p ON pc.passkey_id = p.id + WHERE p.user_id = $1 AND p.deleted_at IS NULL + `, userID) + if err != nil { + err = stacktrace.Propagate(err, "") + return + } + defer rows.Close() + + for rows.Next() { + var pc ente.PasskeyCredential + if err = rows.Scan( + &pc.PasskeyID, + &pc.CredentialID, + &pc.PublicKey, + &pc.AttestationType, + &pc.AuthenticatorTransports, + &pc.CredentialFlags, + &pc.Authenticator, + &pc.CreatedAt, + ); err != nil { + err = stacktrace.Propagate(err, "") + return + } + + var cred *webauthn.Credential + cred, err = pc.WebAuthnCredential() + if err != nil { + err = stacktrace.Propagate(err, "") + return + } + + credentials = append(credentials, *cred) + } + + return +} + +func (repo *Repository) RemoveExpiredPasskeySessions() error { + _, err := repo.DB.Exec(`DELETE FROM webauthn_sessions WHERE expires_at <= $1`, + ente_time.Microseconds()) + if err != nil { + return stacktrace.Propagate(err, "") + } + + _, err = repo.DB.Exec(`DELETE FROM passkey_login_sessions WHERE expiration_time <= $1`, + ente_time.Microseconds()) + + return stacktrace.Propagate(err, "") +} diff --git a/server/pkg/repo/passkey/session.go b/server/pkg/repo/passkey/session.go new file mode 100644 index 000000000..cd83c8a1b --- /dev/null +++ b/server/pkg/repo/passkey/session.go @@ -0,0 +1,36 @@ +package passkey + +import ( + "github.com/ente-io/museum/ente" + "github.com/google/uuid" +) + +func (r *Repository) getWebAuthnSessionByID(sessionID uuid.UUID) (session *ente.WebAuthnSession, err error) { + + session = &ente.WebAuthnSession{} + + err = r.DB.QueryRow(` + SELECT + id, + challenge, + user_id, + allowed_credential_ids, + expires_at, + user_verification_requirement, + extensions, + created_at + FROM webauthn_sessions + WHERE id = $1 + `, sessionID).Scan( + &session.ID, + &session.Challenge, + &session.UserID, + &session.AllowedCredentialIDs, + &session.ExpiresAt, + &session.UserVerificationRequirement, + &session.Extensions, + &session.CreatedAt, + ) + + return +} diff --git a/server/pkg/repo/public_collection.go b/server/pkg/repo/public_collection.go new file mode 100644 index 000000000..0b1d2514f --- /dev/null +++ b/server/pkg/repo/public_collection.go @@ -0,0 +1,192 @@ +package repo + +import ( + "context" + "database/sql" + "errors" + "fmt" + + "github.com/ente-io/museum/ente" + "github.com/ente-io/stacktrace" + "github.com/lib/pq" +) + +const BaseShareURL = "https://albums.ente.io?t=%s" + +// PublicCollectionRepository defines the methods for inserting, updating and +// retrieving entities related to public collections +type PublicCollectionRepository struct { + DB *sql.DB +} + +func (pcr *PublicCollectionRepository) Insert(ctx context.Context, + cID int64, token string, validTill int64, deviceLimit int, enableCollect bool) error { + _, err := pcr.DB.ExecContext(ctx, `INSERT INTO public_collection_tokens + (collection_id, access_token, valid_till, device_limit, enable_collect) VALUES ($1, $2, $3, $4, $5)`, + cID, token, validTill, deviceLimit, enableCollect) + if err != nil && err.Error() == "pq: duplicate key value violates unique constraint \"public_active_collection_unique_idx\"" { + return ente.ErrActiveLinkAlreadyExists + } + return stacktrace.Propagate(err, "failed to insert") +} + +func (pcr *PublicCollectionRepository) DisableSharing(ctx context.Context, cID int64) error { + _, err := pcr.DB.ExecContext(ctx, `UPDATE public_collection_tokens SET is_disabled = true where + collection_id = $1 and is_disabled = false`, cID) + return stacktrace.Propagate(err, "failed to disable sharing") +} + +// GetCollectionToActivePublicURLMap will return map of collectionID to PublicURLs which are not disabled yet. +// Note: The url could be expired or deviceLimit is already reached +func (pcr *PublicCollectionRepository) GetCollectionToActivePublicURLMap(ctx context.Context, collectionIDs []int64) (map[int64][]ente.PublicURL, error) { + rows, err := pcr.DB.QueryContext(ctx, `SELECT collection_id, access_token, valid_till, device_limit, enable_download, enable_collect, pw_nonce, mem_limit, ops_limit FROM + public_collection_tokens WHERE collection_id = ANY($1) and is_disabled = FALSE`, + pq.Array(collectionIDs)) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + defer rows.Close() + result := make(map[int64][]ente.PublicURL, 0) + for _, cID := range collectionIDs { + result[cID] = make([]ente.PublicURL, 0) + } + for rows.Next() { + publicUrl := ente.PublicURL{} + var collectionID int64 + var accessToken string + var nonce *string + var opsLimit, memLimit *int64 + if err = rows.Scan(&collectionID, &accessToken, &publicUrl.ValidTill, &publicUrl.DeviceLimit, &publicUrl.EnableDownload, &publicUrl.EnableCollect, &nonce, &memLimit, &opsLimit); err != nil { + return nil, stacktrace.Propagate(err, "") + } + publicUrl.URL = fmt.Sprintf(BaseShareURL, accessToken) + if nonce != nil { + publicUrl.Nonce = nonce + publicUrl.MemLimit = memLimit + publicUrl.OpsLimit = opsLimit + publicUrl.PasswordEnabled = true + } + result[collectionID] = append(result[collectionID], publicUrl) + } + return result, nil +} + +// GetActivePublicCollectionToken will return ente.PublicCollectionToken for given collection ID +// Note: The token could be expired or deviceLimit is already reached +func (pcr *PublicCollectionRepository) GetActivePublicCollectionToken(ctx context.Context, collectionID int64) (ente.PublicCollectionToken, error) { + row := pcr.DB.QueryRowContext(ctx, `SELECT id, collection_id, access_token, valid_till, device_limit, + is_disabled, pw_hash, pw_nonce, mem_limit, ops_limit, enable_download, enable_collect FROM + public_collection_tokens WHERE collection_id = $1 and is_disabled = FALSE`, + collectionID) + + //defer rows.Close() + ret := ente.PublicCollectionToken{} + err := row.Scan(&ret.ID, &ret.CollectionID, &ret.Token, &ret.ValidTill, &ret.DeviceLimit, + &ret.IsDisabled, &ret.PassHash, &ret.Nonce, &ret.MemLimit, &ret.OpsLimit, &ret.EnableDownload, &ret.EnableCollect) + if err != nil { + return ente.PublicCollectionToken{}, stacktrace.Propagate(err, "") + } + return ret, nil +} + +// UpdatePublicCollectionToken will update the row for corresponding public collection token +func (pcr *PublicCollectionRepository) UpdatePublicCollectionToken(ctx context.Context, pct ente.PublicCollectionToken) error { + _, err := pcr.DB.ExecContext(ctx, `UPDATE public_collection_tokens SET valid_till = $1, device_limit = $2, + pw_hash = $3, pw_nonce = $4, mem_limit = $5, ops_limit = $6, enable_download = $7, enable_collect = $8 + where id = $9`, + pct.ValidTill, pct.DeviceLimit, pct.PassHash, pct.Nonce, pct.MemLimit, pct.OpsLimit, pct.EnableDownload, pct.EnableCollect, pct.ID) + return stacktrace.Propagate(err, "failed to update public collection token") +} + +func (pcr *PublicCollectionRepository) RecordAbuseReport(ctx context.Context, accessCtx ente.PublicAccessContext, + url string, reason string, details ente.AbuseReportDetails) error { + _, err := pcr.DB.ExecContext(ctx, `INSERT INTO public_abuse_report + (share_id, ip, user_agent, url, reason, details) VALUES ($1, $2, $3, $4, $5, $6) + ON CONFLICT ON CONSTRAINT unique_report_sid_ip_ua DO UPDATE SET (reason, details) = ($5, $6)`, + accessCtx.ID, accessCtx.IP, accessCtx.UserAgent, url, reason, details) + return stacktrace.Propagate(err, "failed to record abuse report") +} + +func (pcr *PublicCollectionRepository) GetAbuseReportCount(ctx context.Context, accessCtx ente.PublicAccessContext) (int64, error) { + row := pcr.DB.QueryRowContext(ctx, `SELECT count(*) FROM public_abuse_report WHERE share_id = $1`, accessCtx.ID) + var count int64 = 0 + err := row.Scan(&count) + if err != nil { + return -1, stacktrace.Propagate(err, "") + } + return count, nil +} + +func (pcr *PublicCollectionRepository) GetUniqueAccessCount(ctx context.Context, shareId int64) (int64, error) { + row := pcr.DB.QueryRowContext(ctx, `SELECT count(*) FROM public_collection_access_history WHERE share_id = $1`, shareId) + var count int64 = 0 + err := row.Scan(&count) + if err != nil { + return -1, stacktrace.Propagate(err, "") + } + return count, nil +} + +func (pcr *PublicCollectionRepository) RecordAccessHistory(ctx context.Context, shareID int64, ip string, ua string) error { + _, err := pcr.DB.ExecContext(ctx, `INSERT INTO public_collection_access_history + (share_id, ip, user_agent) VALUES ($1, $2, $3) + ON CONFLICT ON CONSTRAINT unique_access_sid_ip_ua DO NOTHING;`, + shareID, ip, ua) + return stacktrace.Propagate(err, "failed to record access history") +} + +// AccessedInPast returns true if the given ip, ua agent combination has accessed the url in the past +func (pcr *PublicCollectionRepository) AccessedInPast(ctx context.Context, shareID int64, ip string, ua string) (bool, error) { + row := pcr.DB.QueryRowContext(ctx, `select share_id from public_collection_access_history where share_id =$1 and ip = $2 and user_agent = $3`, + shareID, ip, ua) + var tempID int64 + err := row.Scan(&tempID) + if errors.Is(err, sql.ErrNoRows) { + return false, nil + } + return true, stacktrace.Propagate(err, "failed to record access history") +} + +func (pcr *PublicCollectionRepository) GetCollectionSummaryByToken(ctx context.Context, accessToken string) (ente.PublicCollectionSummary, error) { + row := pcr.DB.QueryRowContext(ctx, + `SELECT sct.id, sct.collection_id, sct.is_disabled, sct.valid_till, sct.device_limit, sct.pw_hash, + sct.created_at, sct.updated_at, count(ah.share_id) + from public_collection_tokens sct + LEFT JOIN public_collection_access_history ah ON sct.id = ah.share_id + where access_token = $1 + group by sct.id`, accessToken) + var result = ente.PublicCollectionSummary{} + err := row.Scan(&result.ID, &result.CollectionID, &result.IsDisabled, &result.ValidTill, &result.DeviceLimit, + &result.PassHash, &result.CreatedAt, &result.UpdatedAt, &result.DeviceAccessCount) + if err != nil { + return ente.PublicCollectionSummary{}, stacktrace.Propagate(err, "failed to get public collection summary") + } + return result, nil +} + +func (pcr *PublicCollectionRepository) GetActivePublicTokenForUser(ctx context.Context, userID int64) ([]int64, error) { + rows, err := pcr.DB.QueryContext(ctx, `select pt.collection_id from public_collection_tokens pt left join collections c on pt.collection_id = c.collection_id where pt.is_disabled = FALSE and c.owner_id= $1;`, userID) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + defer rows.Close() + result := make([]int64, 0) + for rows.Next() { + var collectionID int64 + err = rows.Scan(&collectionID) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + result = append(result, collectionID) + } + return result, nil +} + +// CleanupAccessHistory public_collection_access_history where public_collection_tokens is disabled and the last updated time is older than 30 days +func (pcr *PublicCollectionRepository) CleanupAccessHistory(ctx context.Context) error { + _, err := pcr.DB.ExecContext(ctx, `DELETE FROM public_collection_access_history WHERE share_id IN (SELECT id FROM public_collection_tokens WHERE is_disabled = TRUE AND updated_at < (now_utc_micro_seconds() - (24::BIGINT * 30 * 60 * 60 * 1000 * 1000)))`) + if err != nil { + return stacktrace.Propagate(err, "failed to clean up public collection access history") + } + return nil +} diff --git a/server/pkg/repo/push.go b/server/pkg/repo/push.go new file mode 100644 index 000000000..522c3bf0e --- /dev/null +++ b/server/pkg/repo/push.go @@ -0,0 +1,62 @@ +package repo + +import ( + "database/sql" + + "github.com/ente-io/museum/ente" + "github.com/ente-io/museum/pkg/utils/time" + "github.com/ente-io/stacktrace" + "github.com/lib/pq" +) + +type PushTokenRepository struct { + DB *sql.DB +} + +func (repo *PushTokenRepository) AddToken(userID int64, token ente.PushTokenRequest) error { + _, err := repo.DB.Exec(`INSERT INTO push_tokens(user_id, fcm_token, apns_token) VALUES($1, $2, $3) + ON CONFLICT (fcm_token) DO UPDATE + SET apns_token = $3`, + userID, token.FCMToken, token.APNSToken) + return stacktrace.Propagate(err, "") +} + +func (repo *PushTokenRepository) GetTokensToBeNotified(lastNotificationTime int64, limit int) ([]ente.PushToken, error) { + rows, err := repo.DB.Query(`SELECT user_id, fcm_token, created_at, last_notified_at FROM push_tokens WHERE last_notified_at < $1 LIMIT $2`, lastNotificationTime, limit) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + defer rows.Close() + tokens := make([]ente.PushToken, 0) + for rows.Next() { + var token ente.PushToken + err = rows.Scan(&token.UserID, &token.FCMToken, &token.CreatedAt, &token.LastNotifiedAt) + if err != nil { + return tokens, stacktrace.Propagate(err, "") + } + tokens = append(tokens, token) + } + return tokens, nil +} + +func (repo *PushTokenRepository) SetLastNotificationTimeToNow(pushTokens []ente.PushToken) error { + fcmTokens := make([]string, 0) + for _, pushToken := range pushTokens { + fcmTokens = append(fcmTokens, pushToken.FCMToken) + } + _, err := repo.DB.Exec(`UPDATE push_tokens SET last_notified_at = $1 WHERE fcm_token = ANY($2)`, time.Microseconds(), pq.Array(fcmTokens)) + return stacktrace.Propagate(err, "Could not set last notification time") +} + +func (repo *PushTokenRepository) RemoveTokensOlderThan(creationTime int64) error { + _, err := repo.DB.Exec(`DELETE FROM push_tokens WHERE updated_at <= $1`, creationTime) + return stacktrace.Propagate(err, "") +} + +func (repo *PushTokenRepository) RemoveTokensForUser(userID int64) error { + // Does a seq scan but should be fine since this is relatively infrequent + // and the size of the push tokens table will be small (as it gets + // periodically pruned). + _, err := repo.DB.Exec(`DELETE FROM push_tokens WHERE user_id = $1`, userID) + return stacktrace.Propagate(err, "") +} diff --git a/server/pkg/repo/queue.go b/server/pkg/repo/queue.go new file mode 100644 index 000000000..325a53c78 --- /dev/null +++ b/server/pkg/repo/queue.go @@ -0,0 +1,135 @@ +package repo + +import ( + "context" + "database/sql" + "fmt" + "strconv" + "strings" + + "github.com/ente-io/museum/pkg/utils/time" + "github.com/ente-io/stacktrace" +) + +// QueueRepository defines methods to insert, delete items from queue +type QueueRepository struct { + DB *sql.DB +} + +// itemDeletionDelayInMinMap tracks the delay (in min) after which an item is ready to be processed. +// -ve entry indicates that the item should be processed immediately, without any delay. +var itemDeletionDelayInMinMap = map[string]int64{ + DropFileEncMedataQueue: -1 * 24 * 60, // -ve value to ensure attributes are immediately removed + DeleteObjectQueue: 45 * 24 * 60, // 45 days in minutes + DeleteEmbeddingsQueue: -1 * 24 * 60, // -ve value to ensure embeddings are immediately removed + TrashCollectionQueue: -1 * 24 * 60, // -ve value to ensure collections are immediately marked as trashed + TrashCollectionQueueV3: -1 * 24 * 60, // -ve value to ensure collections are immediately marked as trashed + TrashEmptyQueue: -1 * 24 * 60, // -ve value to ensure empty trash request are processed in next cron run + RemoveComplianceHoldQueue: -1 * 24 * 60, // -ve value to ensure compliance hold is removed in next cron run +} + +const ( + DropFileEncMedataQueue string = "dropFileEncMetata" + DeleteObjectQueue string = "deleteObject" + DeleteEmbeddingsQueue string = "deleteEmbedding" + OutdatedObjectsQueue string = "outdatedObject" + TrashCollectionQueue string = "trashCollection" + TrashCollectionQueueV3 string = "trashCollectionV3" + TrashEmptyQueue string = "trashEmpty" + RemoveComplianceHoldQueue string = "removeComplianceHold" + BatchSize int = 30000 +) + +type QueueItem struct { + Id int64 + Item string +} + +// InsertItem adds entry in the queue with given queueName and item. If entry already exists, it's no-op +func (repo *QueueRepository) InsertItem(ctx context.Context, queueName string, item string) error { + _, err := repo.DB.ExecContext(ctx, `INSERT INTO queue(queue_name, item) VALUES($1, $2) + ON CONFLICT (queue_name, item) DO NOTHING`, queueName, item) + if err != nil { + return stacktrace.Propagate(err, "") + } + return nil +} + +func (repo *QueueRepository) UpdateItem(ctx context.Context, queueName string, queueID int64, item string) error { + rows, err := repo.DB.ExecContext(ctx, `UPDATE queue SET item = $1 WHERE queue_name = $2 AND queue_id = $3 AND is_deleted=false`, item, queueName, queueID) + if err != nil { + return stacktrace.Propagate(err, "") + } + count, err := rows.RowsAffected() + if err != nil { + return stacktrace.Propagate(err, "") + } + if count == 0 { + return fmt.Errorf("no item found with queueID: %d for queue %s", queueID, queueName) + } + return nil +} + +// AddItems adds a list of item against a specified queue +func (repo *QueueRepository) AddItems(ctx context.Context, tx *sql.Tx, queueName string, items []string) error { + if len(items) == 0 { + return nil + } + lb := 0 + size := len(items) + for lb < size { + ub := lb + BatchSize + if ub > size { + ub = size + } + slicedList := items[lb:ub] + query := "INSERT INTO queue(queue_name, item) VALUES " + var inserts []string + var params []interface{} + for i, v := range slicedList { + inserts = append(inserts, `($`+strconv.Itoa(2*i+1)+`,$`+strconv.Itoa(2*i+2)+`)`) + params = append(params, queueName, v) + } + queryVals := strings.Join(inserts, ",") + query = query + queryVals + query = query + " ON CONFLICT (queue_name, item) DO NOTHING" + _, err := tx.ExecContext(ctx, query, params...) + if err != nil { + return stacktrace.Propagate(err, "") + } + lb += BatchSize + } + return nil +} + +func (repo *QueueRepository) DeleteItem(queueName string, item string) error { + _, err := repo.DB.Exec(`UPDATE queue SET is_deleted = $1 WHERE queue_name = $2 AND item=$3`, true, queueName, item) + return stacktrace.Propagate(err, "") +} + +// GetItemsReadyForDeletion method, for a given queue name, returns a list of QueueItem which are ready for deletion +func (repo *QueueRepository) GetItemsReadyForDeletion(queueName string, count int) ([]QueueItem, error) { + delayInMin, ok := itemDeletionDelayInMinMap[queueName] + if !ok { + return nil, stacktrace.Propagate(fmt.Errorf("missing delay for %s", queueName), "") + } + rows, err := repo.DB.Query(`SELECT queue_id, item FROM queue WHERE + queue_name=$1 and created_at <= $2 and is_deleted = false order by created_at ASC LIMIT $3`, + queueName, time.MicrosecondsBeforeMinutes(delayInMin), count) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + + defer rows.Close() + items := make([]QueueItem, 0) + + for rows.Next() { + var item QueueItem + err = rows.Scan(&item.Id, &item.Item) + if err != nil { + return items, stacktrace.Propagate(err, "") + } + items = append(items, item) + } + return items, stacktrace.Propagate(err, "") +} diff --git a/server/pkg/repo/remotestore/repository.go b/server/pkg/repo/remotestore/repository.go new file mode 100644 index 000000000..dc54b0cfc --- /dev/null +++ b/server/pkg/repo/remotestore/repository.go @@ -0,0 +1,42 @@ +package remotestore + +import ( + "context" + "database/sql" + + "github.com/ente-io/stacktrace" +) + +// Repository defines the methods for inserting, updating and retrieving +// remote store key and values +type Repository struct { + DB *sql.DB +} + +// +func (r *Repository) InsertOrUpdate(ctx context.Context, userID int64, key string, value string) error { + _, err := r.DB.ExecContext(ctx, `INSERT INTO remote_store(user_id, key_name, key_value) VALUES ($1,$2,$3) + ON CONFLICT (user_id, key_name) DO UPDATE SET key_value = $3; + `, + userID, //$1 user_id + key, // $2 key_name + value, // $3 key_value + ) + return stacktrace.Propagate(err, "failed to insert/update") +} + +// GetValue fetches and return the value for given user_id and key +func (r *Repository) GetValue(ctx context.Context, userID int64, key string) (string, error) { + rows := r.DB.QueryRowContext(ctx, `SELECT key_value FROM remote_store + WHERE user_id = $1 + and key_name = $2`, + userID, // $1 + key, // %2 + ) + var keyValue string + err := rows.Scan(&keyValue) + if err != nil { + return keyValue, stacktrace.Propagate(err, "reading value failed") + } + return keyValue, nil +} diff --git a/server/pkg/repo/srp.go b/server/pkg/repo/srp.go new file mode 100644 index 000000000..33e5097ac --- /dev/null +++ b/server/pkg/repo/srp.go @@ -0,0 +1,192 @@ +package repo + +import ( + "context" + "database/sql" + "errors" + + "github.com/ente-io/museum/ente" + "github.com/ente-io/stacktrace" + "github.com/google/uuid" +) + +// AddSRPSession inserts a SRPSession and returns the session id +func (repo *UserAuthRepository) AddSRPSession(srpUserID uuid.UUID, serverKey string, srpA string) (uuid.UUID, error) { + id := uuid.New() + _, err := repo.DB.Exec(` + INSERT INTO srp_sessions(id, srp_user_id, server_key, srp_a) + VALUES($1, $2 , $3, $4)`, id, srpUserID, serverKey, srpA) + return id, stacktrace.Propagate(err, "") +} + +func (repo *UserAuthRepository) GetSRPAuthEntity(ctx context.Context, userID int64) (*ente.SRPAuthEntity, error) { + result := ente.SRPAuthEntity{} + row := repo.DB.QueryRowContext(ctx, `SELECT user_id, srp_user_id, salt, verifier FROM srp_auth WHERE user_id = $1`, userID) + err := row.Scan(&result.UserID, &result.SRPUserID, &result.Salt, &result.Verifier) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + return &result, nil +} + +func (repo *UserAuthRepository) GetSRPAuthEntityBySRPUserID(ctx context.Context, srpUserID uuid.UUID) (*ente.SRPAuthEntity, error) { + result := ente.SRPAuthEntity{} + row := repo.DB.QueryRowContext(ctx, `SELECT user_id, srp_user_id, salt, verifier FROM srp_auth WHERE srp_user_id = $1`, srpUserID) + err := row.Scan(&result.UserID, &result.SRPUserID, &result.Salt, &result.Verifier) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + return &result, nil + +} + +// IsSRPSetupDone returns true if the user has already set SRP attributes +func (repo *UserAuthRepository) IsSRPSetupDone(ctx context.Context, userID int64) (bool, error) { + _, err := repo.GetSRPAuthEntity(ctx, userID) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + return false, nil + } + return false, stacktrace.Propagate(err, "failed to read srp attributes") + } + return true, nil +} + +// UpdateEmailMFA updates the email MFA status of a user +func (repo *UserAuthRepository) UpdateEmailMFA(ctx context.Context, userID int64, isEnabled bool) error { + _, err := repo.DB.ExecContext(ctx, `UPDATE users SET email_mfa = $1 WHERE user_id = $2`, isEnabled, userID) + if err != nil { + return stacktrace.Propagate(err, "failed to update email MFA status") + } + return nil +} + +func (repo *UserAuthRepository) IsEmailMFAEnabled(ctx context.Context, userID int64) (*bool, error) { + row := repo.DB.QueryRowContext(ctx, `SELECT email_mfa FROM users WHERE user_id = $1`, userID) + var isEnabled bool + err := row.Scan(&isEnabled) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + return &isEnabled, nil +} + +// InsertTempSRPSetup inserts an entry into the temp_srp_setup table. It also returns the ID of the inserted row +func (repo *UserAuthRepository) InsertTempSRPSetup(ctx context.Context, req ente.SetupSRPRequest, userID int64, sessionID *uuid.UUID) (*uuid.UUID, error) { + id := uuid.New() + _, err := repo.DB.ExecContext(ctx, ` + INSERT INTO temp_srp_setup(id, session_id, user_id, srp_user_id, salt, verifier) VALUES($1, $2 , $3, $4, $5, $6)`, + id, sessionID, userID, req.SrpUserID, req.SRPSalt, req.SRPVerifier) + return &id, stacktrace.Propagate(err, "") +} + +func (repo *UserAuthRepository) GetTempSRPSetupEntity(ctx context.Context, setUpID uuid.UUID) (*ente.SRPSetupEntity, error) { + result := ente.SRPSetupEntity{} + row := repo.DB.QueryRowContext(ctx, `SELECT id, session_id, user_id, srp_user_id, salt, verifier FROM temp_srp_setup WHERE id = $1`, setUpID) + err := row.Scan(&result.ID, &result.SessionID, &result.UserID, &result.SRPUserID, &result.Salt, &result.Verifier) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + return &result, nil +} + +func (repo *UserAuthRepository) InsertSRPAuth(ctx context.Context, userID int64, srpUserID uuid.UUID, verifier string, salt string) error { + isSRPSetupDone, err := repo.IsSRPSetupDone(ctx, userID) + if err != nil { + return stacktrace.Propagate(err, "") + } + if isSRPSetupDone { + return stacktrace.Propagate(ente.NewBadRequestWithMessage("SRP setup already complete"), "") + } + _, err = repo.DB.ExecContext(ctx, ` + INSERT INTO srp_auth(user_id, srp_user_id, salt, verifier) VALUES($1, $2 , $3, $4)`, + userID, srpUserID, salt, verifier) + return stacktrace.Propagate(err, "") +} + +func (repo *UserAuthRepository) InsertOrUpdateSRPAuthAndKeyAttr(ctx context.Context, userID int64, req ente.UpdateSRPAndKeysRequest, setup *ente.SRPSetupEntity) error { + isSRPSetupDone, err := repo.IsSRPSetupDone(ctx, userID) + if err != nil { + return stacktrace.Propagate(err, "") + } + tx, err := repo.DB.BeginTx(ctx, nil) + if err != nil { + return stacktrace.Propagate(err, "") + } + if !isSRPSetupDone { + _, err = tx.ExecContext(ctx, ` + INSERT INTO srp_auth(user_id, srp_user_id, salt, verifier) VALUES($1, $2 , $3, $4)`, + userID, setup.SRPUserID, setup.Salt, setup.Verifier) + } else { + _, err = tx.ExecContext(ctx, `UPDATE srp_auth SET srp_user_id = $1, salt = $2, verifier = $3 WHERE user_id = $4`, + setup.SRPUserID, setup.Salt, setup.Verifier, userID) + } + if err != nil { + rollBackErr := tx.Rollback() + if rollBackErr != nil { + return rollBackErr + } + return stacktrace.Propagate(err, "") + } + updateKeyAttr := *req.UpdateAttributes + _, err = tx.ExecContext(ctx, `UPDATE key_attributes SET kek_salt = $1, encrypted_key = $2, key_decryption_nonce = $3, mem_limit = $4, ops_limit = $5 WHERE user_id = $6`, + updateKeyAttr.KEKSalt, updateKeyAttr.EncryptedKey, updateKeyAttr.KeyDecryptionNonce, updateKeyAttr.MemLimit, updateKeyAttr.OpsLimit, userID) + if err != nil { + rollBackErr := tx.Rollback() + if rollBackErr != nil { + return rollBackErr + } + return stacktrace.Propagate(err, "") + } + return tx.Commit() +} + +// GetSrpSessionEntity ... +func (repo *UserAuthRepository) GetSrpSessionEntity(ctx context.Context, sessionID uuid.UUID) (*ente.SRPSessionEntity, error) { + result := ente.SRPSessionEntity{} + row := repo.DB.QueryRowContext(ctx, `SELECT id, srp_user_id, server_key, srp_a, has_verified, attempt_count FROM srp_sessions WHERE id = $1`, sessionID) + err := row.Scan(&result.ID, &result.SRPUserID, &result.ServerKey, &result.SRP_A, &result.IsVerified, &result.AttemptCount) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + return &result, nil +} + +// IncrementSrpSessionAttemptCount increments the verification attempt count of a session +func (repo *UserAuthRepository) IncrementSrpSessionAttemptCount(ctx context.Context, sessionID uuid.UUID) error { + _, err := repo.DB.ExecContext(ctx, `UPDATE srp_sessions SET attempt_count = attempt_count + 1 WHERE id = $1`, sessionID) + return stacktrace.Propagate(err, "") +} + +// SetSrpSessionVerified .. +func (repo *UserAuthRepository) SetSrpSessionVerified(ctx context.Context, sessionID uuid.UUID) error { + _, err := repo.DB.ExecContext(ctx, `UPDATE srp_sessions SET has_verified = true WHERE id = $1`, sessionID) + return stacktrace.Propagate(err, "") +} + +// GetSRPAttributes returns the srp attributes of a user +func (repo *UserAuthRepository) GetSRPAttributes(userID int64) (*ente.GetSRPAttributesResponse, error) { + row := repo.DB.QueryRow(`SELECT srp_user_id, salt, mem_limit, ops_limit, kek_salt, email_mfa FROM srp_auth left join key_attributes on srp_auth.user_id = key_attributes.user_id + left join users on users.user_id = srp_auth.user_id WHERE srp_auth.user_id = $1`, userID) + var srpAttributes ente.GetSRPAttributesResponse + err := row.Scan(&srpAttributes.SRPUserID, &srpAttributes.SRPSalt, &srpAttributes.MemLimit, &srpAttributes.OpsLimit, &srpAttributes.KekSalt, &srpAttributes.IsEmailMFAEnabled) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + return nil, stacktrace.Propagate(&ente.ErrNotFoundError, "srp attributes are not present") + } + if err.Error() == "sql: Scan error on column index 2, name \"mem_limit\": converting NULL to int is unsupported" { + /* user doesn't have key attributes, deleting the srp auth entry, + so that the user can setup srp again fresh along with key attributes + Can happen if the key attributes setup API is fails, but the srp setup API succeeds + TODO: create a single API for both key attributes and srp setup + */ + _, err := repo.DB.Exec(`DELETE FROM srp_auth WHERE user_id = $1`, userID) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + return nil, stacktrace.Propagate(&ente.ErrNotFoundError, "key attributes are not present") + } + return nil, stacktrace.Propagate(err, "failed to read srp attributes") + } + return &srpAttributes, stacktrace.Propagate(err, "") +} diff --git a/server/pkg/repo/storagebonus/bf_addon.go b/server/pkg/repo/storagebonus/bf_addon.go new file mode 100644 index 000000000..876d4b0b6 --- /dev/null +++ b/server/pkg/repo/storagebonus/bf_addon.go @@ -0,0 +1,34 @@ +package storagebonus + +import ( + "context" + "fmt" + "github.com/ente-io/museum/ente/storagebonus" +) + +func (r *Repository) InsertBFBonus(ctx context.Context, userID int64, validTill int64, storage int64) error { + bonusID := fmt.Sprintf("%s-%d", storagebonus.AddOnBf2023, userID) + _, err := r.DB.ExecContext(ctx, "INSERT INTO storage_bonus (bonus_id, user_id, storage, type, valid_till) VALUES ($1, $2, $3, $4, $5)", bonusID, userID, storage, storagebonus.AddOnBf2023, validTill) + if err != nil { + return err + } + return nil +} + +func (r *Repository) RemoveBFBonus(ctx context.Context, userID int64) (int64, error) { + bonusID := fmt.Sprintf("%s-%d", storagebonus.AddOnBf2023, userID) + res, err := r.DB.ExecContext(ctx, "DELETE FROM storage_bonus WHERE bonus_id = $1", bonusID) + if err != nil { + return 0, err + } + return res.RowsAffected() +} + +func (r *Repository) UpdateBFBonus(ctx context.Context, userID int64, validTill int64, storage int64) error { + bonusID := fmt.Sprintf("%s-%d", storagebonus.AddOnBf2023, userID) + _, err := r.DB.ExecContext(ctx, "UPDATE storage_bonus SET storage = $1, valid_till = $2 WHERE bonus_id = $3", storage, validTill, bonusID) + if err != nil { + return err + } + return nil +} diff --git a/server/pkg/repo/storagebonus/bonus.go b/server/pkg/repo/storagebonus/bonus.go new file mode 100644 index 000000000..05b024917 --- /dev/null +++ b/server/pkg/repo/storagebonus/bonus.go @@ -0,0 +1,102 @@ +package storagebonus + +import ( + "context" + "github.com/lib/pq" + + "github.com/ente-io/museum/ente/storagebonus" + "github.com/ente-io/stacktrace" +) + +// GetStorageBonuses returns the storage surplus for the given userID +func (r *Repository) GetStorageBonuses(ctx context.Context, userID int64) ([]storagebonus.StorageBonus, error) { + var storageSurplus = make([]storagebonus.StorageBonus, 0) + rows, err := r.DB.QueryContext(ctx, "SELECT user_id,storage,type, created_at, updated_at, valid_till, is_revoked, revoke_reason FROM storage_bonus WHERE user_id = $1", userID) + if err != nil { + return nil, stacktrace.Propagate(err, "failed to get storage surplus for user %d", userID) + } + defer rows.Close() + for rows.Next() { + var ss storagebonus.StorageBonus + err := rows.Scan(&ss.UserID, &ss.Storage, &ss.Type, &ss.CreatedAt, &ss.UpdatedAt, &ss.ValidTill, &ss.IsRevoked, &ss.RevokeReason) + if err != nil { + return nil, stacktrace.Propagate(err, "failed to scan storage surplus for user %d", userID) + } + storageSurplus = append(storageSurplus, ss) + } + return storageSurplus, nil +} + +func (r *Repository) GetActiveStorageBonuses(ctx context.Context, userID int64) (*storagebonus.ActiveStorageBonus, error) { + var bonuses = make([]storagebonus.StorageBonus, 0) + rows, err := r.DB.QueryContext(ctx, "SELECT user_id,storage,type, created_at, updated_at, valid_till, is_revoked, revoke_reason FROM storage_bonus WHERE user_id = $1 AND is_revoked = false AND (valid_till = 0 OR valid_till > now_utc_micro_seconds())", userID) + if err != nil { + return nil, stacktrace.Propagate(err, "failed to get active storage surplus for user %d", userID) + } + defer rows.Close() + for rows.Next() { + var ss storagebonus.StorageBonus + err := rows.Scan(&ss.UserID, &ss.Storage, &ss.Type, &ss.CreatedAt, &ss.UpdatedAt, &ss.ValidTill, &ss.IsRevoked, &ss.RevokeReason) + if err != nil { + return nil, stacktrace.Propagate(err, "failed to scan active storage surplus for user %d", userID) + } + bonuses = append(bonuses, ss) + } + return &storagebonus.ActiveStorageBonus{StorageBonuses: bonuses}, nil +} + +// ActiveStorageSurplusOfType returns the total storage surplus for a given userID. Surplus is considered as active when +// it is not revoked and not expired aka validTill is 0 or greater than now_utc_micro_seconds() +func (r *Repository) ActiveStorageSurplusOfType(ctx context.Context, userID int64, bonusTypes []storagebonus.BonusType) (*int64, error) { + var total *int64 + rows, err := r.DB.QueryContext(ctx, "SELECT coalesce(sum(storage),0) FROM storage_bonus "+ + "WHERE user_id = $1 AND type = ANY($2) AND is_revoked = false AND (valid_till = 0 OR valid_till > now_utc_micro_seconds())", userID, pq.Array(bonusTypes)) + if err != nil { + return nil, stacktrace.Propagate(err, "failed to get active storage surplus for users %d", userID) + } + defer rows.Close() + for rows.Next() { + err := rows.Scan(&total) + if err != nil { + return nil, stacktrace.Propagate(err, "failed to scan active storage surplus for users %d", userID) + } + } + return total, nil +} + +// GetPaidAddonSurplusStorage returns the total storage surplus for a given userID. Surplus is considered as active when +// it is not revoked and not expired aka validTill is 0 or greater than now_utc_micro_seconds() +func (r *Repository) GetPaidAddonSurplusStorage(ctx context.Context, userID int64) (*int64, error) { + return r.ActiveStorageSurplusOfType(ctx, userID, storagebonus.PaidAddOnTypes) +} + +// GetAllUsersSurplusBonus returns two maps userID to referralBonus & addonBonus +func (r *Repository) GetAllUsersSurplusBonus(ctx context.Context) (refBonus map[int64]int64, addonBonus map[int64]int64, err error) { + var userID, bonus int64 + var bonusType storagebonus.BonusType + refBonus = make(map[int64]int64) + addonBonus = make(map[int64]int64) + rows, err := r.DB.QueryContext(ctx, "SELECT user_id, type, coalesce(sum(storage),0) FROM storage_bonus WHERE is_revoked = false AND (valid_till = 0 OR valid_till > now_utc_micro_seconds()) GROUP BY user_id, type") + if err != nil { + return nil, nil, stacktrace.Propagate(err, "failed to get active storage surplus for users") + } + defer rows.Close() + for rows.Next() { + err := rows.Scan(&userID, &bonusType, &bonus) + if err != nil { + return nil, nil, stacktrace.Propagate(err, "failed to scan active storage surplus for users") + } + if _, ok := refBonus[userID]; !ok { + refBonus[userID] = 0 + } + if _, ok := addonBonus[userID]; !ok { + addonBonus[userID] = 0 + } + if bonusType.RestrictToDoublingStorage() { + refBonus[userID] += bonus + } else { + addonBonus[userID] += bonus + } + } + return refBonus, addonBonus, nil +} diff --git a/server/pkg/repo/storagebonus/bonus_test.go b/server/pkg/repo/storagebonus/bonus_test.go new file mode 100644 index 000000000..87978dd8e --- /dev/null +++ b/server/pkg/repo/storagebonus/bonus_test.go @@ -0,0 +1,25 @@ +package storagebonus + +import ( + "testing" +) + +// Write test for adding entry in storage_surplus table +func TestAddStorageSurplus(t *testing.T) { + //repo := NewRepository(db) // Test + //err := repo.AddStorageBonus(context.Background(), 10, 1024, storagebonus.SignUp) + //assert.NoError(t, err) + //storageSurplus, err := repo.GetStorageBonuses(context.Background(), 10) + //assert.NoError(t, err) + // + //assert.Equal(t, 1, len(storageSurplus), "length mismatch") + //assert.Truef(t, cmp.Equal(storageSurplus[0], storagebonus.StorageBonus{ + // UserID: 10, + // Storage: 1024, + // Type: storagebonus.SignUp, + // ValidTill: 0, + // RevokeReason: nil, + // IsRevoked: false, + //}, cmpopts.IgnoreFields(storagebonus.StorageBonus{}, "CreatedAt", "UpdatedAt")), "match failed") + +} diff --git a/server/pkg/repo/storagebonus/referral_codes.go b/server/pkg/repo/storagebonus/referral_codes.go new file mode 100644 index 000000000..cd4dbe755 --- /dev/null +++ b/server/pkg/repo/storagebonus/referral_codes.go @@ -0,0 +1,55 @@ +package storagebonus + +import ( + "context" + "database/sql" + + entity "github.com/ente-io/museum/ente/storagebonus" + "github.com/ente-io/stacktrace" +) + +// Add context as first parameter in all methods in this file + +// GetCode returns the storagebonus code for the given userID +func (r *Repository) GetCode(ctx context.Context, userID int64) (*string, error) { + var code *string + err := r.DB.QueryRowContext(ctx, "SELECT code FROM referral_codes WHERE user_id = $1 and is_active = TRUE", userID).Scan(&code) + return code, stacktrace.Propagate(err, "failed to get storagebonus code for user %d", userID) +} + +// InsertCode for the given userID +func (r *Repository) InsertCode(ctx context.Context, userID int64, code string) error { + _, err := r.DB.ExecContext(ctx, "INSERT INTO referral_codes (user_id, code) VALUES ($1, $2)", userID, code) + if err != nil { + if err.Error() == "pq: duplicate key value violates unique constraint \"referral_codes_pkey\"" { + return stacktrace.Propagate(entity.CodeAlreadyExistsErr, "duplicate storagebonus code for user %d", userID) + } + return stacktrace.Propagate(err, "failed to insert storagebonus code for user %d", userID) + } + return nil +} + +// AddNewCode and mark the old one as inactive for a given userID. +// Note: This method is not being used in the initial MVP as we don't allow user to change the storagebonus +// code +func (r *Repository) AddNewCode(ctx context.Context, userID int64, code string) error { + _, err := r.DB.ExecContext(ctx, "UPDATE referral_codes SET is_active = FALSE WHERE user_id = $1", userID) + if err != nil { + return stacktrace.Propagate(err, "failed to update storagebonus code for user %d", userID) + } + return r.InsertCode(ctx, userID, code) +} + +// GetUserIDByCode returns the userID for the given storagebonus code. The method will also return the userID +// if the code is inactive. +func (r *Repository) GetUserIDByCode(ctx context.Context, code string) (*int64, error) { + var userID int64 + err := r.DB.QueryRowContext(ctx, "SELECT user_id FROM referral_codes WHERE code = $1", code).Scan(&userID) + if err != nil { + if err == sql.ErrNoRows { + return nil, stacktrace.Propagate(entity.InvalidCodeErr, "code %s not found", code) + } + return nil, err + } + return &userID, nil +} diff --git a/server/pkg/repo/storagebonus/referral_codes_test.go b/server/pkg/repo/storagebonus/referral_codes_test.go new file mode 100644 index 000000000..379d93233 --- /dev/null +++ b/server/pkg/repo/storagebonus/referral_codes_test.go @@ -0,0 +1,75 @@ +package storagebonus + +// Unittest cases for storagebonus code repository + +import ( + "context" + "database/sql" + "errors" + "testing" + + entity "github.com/ente-io/museum/ente/storagebonus" + _ "github.com/golang-migrate/migrate/v4/source/file" + + "github.com/stretchr/testify/assert" +) + +// TestGetReferralCode tests the GetCode method +func TestGetReferralCode(t *testing.T) { + ctx := context.Background() + repo := NewRepository(db) + // Test for a user that doesn't have a storagebonus code + userID := int64(1) + code, err := repo.GetCode(ctx, userID) + assert.Nil(t, code) + assert.Equal(t, sql.ErrNoRows, errors.Unwrap(err)) + + // Insert a storagebonus code + newCode := "AABBCC" + err = repo.InsertCode(ctx, userID, newCode) + assert.Nil(t, err) + + // Test for when storagebonus code already exists + err = repo.InsertCode(ctx, userID, newCode) + assert.Error(t, err) + err = errors.Unwrap(err) + // verify that error is of type pq.Error + assert.Equal(t, entity.CodeAlreadyExistsErr, err) + + // Test for a user that has a storagebonus code + code, err = repo.GetCode(ctx, userID) + assert.Nil(t, err) + assert.Equal(t, code, code) +} + +// TestInsertReferralCode tests the InsertCode method +func TestInsertReferralCode(t *testing.T) { + repo := NewRepository(db) + // Insert a referral code + userID := int64(2) + code := "AAEEDD" + err := repo.InsertCode(context.Background(), userID, code) + assert.Nil(t, err) + + codeNew, err := repo.GetCode(context.Background(), userID) + assert.Nil(t, err) + assert.Equal(t, code, *codeNew) +} + +// TestAddNewReferralCode tests the AddNewCode method +func TestAddNewReferralCode(t *testing.T) { + repo := NewRepository(db) + userID := int64(3) + code := "B22222" + err := repo.InsertCode(context.Background(), userID, code) + assert.Nil(t, err) + + newCode := "C22222" + err = repo.AddNewCode(context.Background(), userID, newCode) + assert.Nil(t, err) + + referralCode, err := repo.GetCode(context.Background(), userID) + assert.Nil(t, err) + assert.Equal(t, newCode, *referralCode) + +} diff --git a/server/pkg/repo/storagebonus/referral_tracking.go b/server/pkg/repo/storagebonus/referral_tracking.go new file mode 100644 index 000000000..314decfd3 --- /dev/null +++ b/server/pkg/repo/storagebonus/referral_tracking.go @@ -0,0 +1,172 @@ +package storagebonus + +import ( + "context" + "database/sql" + "fmt" + + "github.com/ente-io/museum/ente" + "github.com/ente-io/museum/ente/storagebonus" + "github.com/ente-io/stacktrace" + "github.com/sirupsen/logrus" +) + +// TrackReferralAndInviteeBonus inserts an entry in the referral_tracking table for given invitee,invitor and planType and insert a storage surplus for the invitee +// in a single txn +func (r *Repository) TrackReferralAndInviteeBonus(ctx context.Context, invitee, codeOwnerId int64, planType storagebonus.PlanType) error { + if invitee == codeOwnerId { + return stacktrace.Propagate(ente.ErrBadRequest, "invitee %d and invitor %d are same", invitee, codeOwnerId) + } + tx, err := r.DB.BeginTx(ctx, nil) + if err != nil { + return stacktrace.Propagate(err, "failed to begin txn for invitee bonus tracking") + } + // Note: Rollback is deferred here because we want to roll back the txn if any of the following queries fail. + // If we defer the rollback after the commit, it will be a no-op. + defer func(tx *sql.Tx) { + err := tx.Rollback() + if err != nil && err.Error() != "sql: transaction has already been committed or rolled back" { + logrus.WithError(err).Error("failed to rollback txn for invitee bonus tracking") + } + }(tx) + _, err = tx.ExecContext(ctx, "INSERT INTO referral_tracking (invitee_id, invitor_id, plan_type) VALUES ($1, $2, $3)", invitee, codeOwnerId, planType) + if err != nil { + return stacktrace.Propagate(err, "failed to insert storagebonus tracking entry for invitee %d, invitor %d and planType %s", invitee, codeOwnerId, planType) + } + bonusType := storagebonus.SignUp + bonusID := fmt.Sprintf("%s-%d", bonusType, invitee) + bonusValue := planType.SignUpInviteeBonus() + // Add storage surplus for the invitee who used the referral code + _, err = tx.ExecContext(ctx, "INSERT INTO storage_bonus (bonus_id,type, user_id, storage) VALUES ($1, $2, $3, $4)", bonusID, bonusType, invitee, bonusValue) + if err != nil { + return stacktrace.Propagate(err, "failed to add storage surplus for user %d", invitee) + } + + err = tx.Commit() + if err != nil { + return stacktrace.Propagate(err, "failed to commit txn for invitee bonus tracking") + } + return nil +} + +// TrackUpgradeAndInvitorBonus invitee upgrade to paid plan from non-paid plan by modifying invitee_on_paid_plan from false to true. +// and insert a storage surplus for the invitor with InvitorBonusOnInviteeUpgrade in +// a single transaction. Verify that the update is happening from non-paid plan to paid plan for the given invitee and invitor +func (r *Repository) TrackUpgradeAndInvitorBonus(ctx context.Context, invitee, invitor int64, planType storagebonus.PlanType) error { + if invitee == invitor { + return stacktrace.Propagate(ente.ErrBadRequest, "invitee %d and invitor %d are same", invitee, invitor) + } + tx, err := r.DB.BeginTx(ctx, nil) + if err != nil { + return stacktrace.Propagate(err, "failed to begin txn for storagebonus tracking") + } + defer func(tx *sql.Tx) { + err := tx.Rollback() + if err != nil { + logrus.WithError(err).Error("failed to rollback txn for storagebonus tracking") + } + }(tx) + result, err := tx.ExecContext(ctx, "UPDATE referral_tracking SET invitee_on_paid_plan = true WHERE invitee_id = $1 AND invitor_id = $2 and invitee_on_paid_plan = FALSE", invitee, invitor) + if err != nil { + return stacktrace.Propagate(err, "failed to update tracking entry for invitee %d, invitor %d", invitee, invitor) + } + affected, err := result.RowsAffected() + if err != nil { + return err + } + if affected == 0 { + return stacktrace.Propagate(err, "failed to update tracking entry for invitee %d, invitor %d", invitee, invitor) + } + // Add storage surplus for the invitor who referred the invitee + bonusType := storagebonus.Referral + bonusID := fmt.Sprintf("%s-upgrade-%d", bonusType, invitee) + bonusValue := planType.InvitorBonusOnInviteeUpgrade() + + _, err = tx.ExecContext(ctx, "INSERT INTO storage_bonus (bonus_id, type, user_id, storage) VALUES ($1, $2, $3, $4)", bonusID, bonusType, invitor, bonusValue) + if err != nil { + return stacktrace.Propagate(err, "failed to add storage surplus for user %d", invitor) + } + + err = tx.Commit() + if err != nil { + return stacktrace.Propagate(err, "failed to commit txn for storagebonus tracking") + } + return nil +} + +// GetUserReferralStats for the given userID for each planType +func (r *Repository) GetUserReferralStats(ctx context.Context, userID int64) ([]storagebonus.UserReferralPlanStat, error) { + rows, err := r.DB.QueryContext(ctx, "SELECT plan_type, COUNT(*), SUM(CASE WHEN invitee_on_paid_plan THEN 1 ELSE 0 END) FROM referral_tracking WHERE invitor_id = $1 GROUP BY plan_type", userID) + if err != nil { + return nil, stacktrace.Propagate(err, "failed to get referral counts for user %d", userID) + } + defer rows.Close() + var counts = make([]storagebonus.UserReferralPlanStat, 0) + for rows.Next() { + var count storagebonus.UserReferralPlanStat + err := rows.Scan(&count.PlanType, &count.TotalCount, &count.UpgradedCount) + if err != nil { + return nil, stacktrace.Propagate(err, "failed to scan referral count for user %d", userID) + } + counts = append(counts, count) + } + return counts, nil +} + +// HasAppliedReferral checks if the given user has applied the storagebonus code in the past +func (r *Repository) HasAppliedReferral(ctx context.Context, invitee int64) (bool, error) { + var count int + err := r.DB.QueryRowContext(ctx, "SELECT COUNT(*) FROM referral_tracking WHERE invitee_id = $1", invitee).Scan(&count) + if err != nil { + return false, stacktrace.Propagate(err, "failed to check if invitee %d has joined", invitee) + } + return count > 0, nil +} + +// GetReferredForUpgradeBonus where is_invitee_on_paid_plan is false and the invitee's is not free plan. +func (r *Repository) GetReferredForUpgradeBonus(ctx context.Context) ([]storagebonus.Tracking, error) { + rows, err := r.DB.QueryContext(ctx, "SELECT invitee_id, invitor_id, plan_type FROM referral_tracking WHERE invitee_on_paid_plan = FALSE AND invitee_id IN (SELECT user_id FROM subscriptions WHERE product_id != $1 and expiry_time > now_utc_micro_seconds())", ente.FreePlanProductID) + if err != nil { + return nil, stacktrace.Propagate(err, "failed to get list of result") + } + defer func(rows *sql.Rows) { + err := rows.Close() + if err != nil { + logrus.WithError(err).Error("failed to close rows") + } + }(rows) + var result = make([]storagebonus.Tracking, 0) + for rows.Next() { + var tracking storagebonus.Tracking + err := rows.Scan(&tracking.Invitee, &tracking.Invitor, &tracking.PlanType) + if err != nil { + return nil, stacktrace.Propagate(err, "failed to scan tracking") + } + result = append(result, tracking) + } + return result, nil +} + +// GetReferredForDowngradePenalty where is_invitee_on_paid_plan is true and the invitee's is free plan. +func (r *Repository) GetReferredForDowngradePenalty(ctx context.Context) ([]storagebonus.Tracking, error) { + rows, err := r.DB.QueryContext(ctx, "SELECT invitee_id, invitor_id, plan_type FROM referral_tracking WHERE invitee_on_paid_plan = TRUE AND invitee_id IN (SELECT user_id FROM subscriptions WHERE expiry_time < now_utc_micro_seconds())") + if err != nil { + return nil, stacktrace.Propagate(err, "failed to get list of result") + } + defer func(rows *sql.Rows) { + err := rows.Close() + if err != nil { + logrus.WithError(err).Error("failed to close rows") + } + }(rows) + var result = make([]storagebonus.Tracking, 0) + for rows.Next() { + var tracking storagebonus.Tracking + err := rows.Scan(&tracking.Invitee, &tracking.Invitor, &tracking.PlanType) + if err != nil { + return nil, stacktrace.Propagate(err, "failed to scan tracking") + } + result = append(result, tracking) + } + return result, nil +} diff --git a/server/pkg/repo/storagebonus/repo.go b/server/pkg/repo/storagebonus/repo.go new file mode 100644 index 000000000..361cd365d --- /dev/null +++ b/server/pkg/repo/storagebonus/repo.go @@ -0,0 +1,18 @@ +package storagebonus + +import ( + "database/sql" +) + +// Repository defines the methods for inserting, updating and retrieving +// authenticator related keys and entities from the underlying repository +type Repository struct { + DB *sql.DB +} + +// NewRepository returns a new instance of Repository +func NewRepository(db *sql.DB) *Repository { + return &Repository{ + DB: db, + } +} diff --git a/server/pkg/repo/storagebonus/repo_test.go b/server/pkg/repo/storagebonus/repo_test.go new file mode 100644 index 000000000..2316bef1a --- /dev/null +++ b/server/pkg/repo/storagebonus/repo_test.go @@ -0,0 +1,67 @@ +package storagebonus + +import ( + "database/sql" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/golang-migrate/migrate/v4" + "github.com/golang-migrate/migrate/v4/database/postgres" + log "github.com/sirupsen/logrus" +) + +var db *sql.DB + +func TestMain(m *testing.M) { + if os.Getenv("ENV") != "test" { + log.Fatalf("Not running tests in non-test environment") + os.Exit(0) + } + err := setupDatabase() + if err != nil { + log.Fatalf("error setting up test database: %v", err) + } + db.QueryRow("DELETE FROM referral_codes") + db.QueryRow("DELETE FROM storage_bonus") + // Run the tests + exitCode := m.Run() + db.QueryRow("DELETE FROM referral_codes") + db.QueryRow("DELETE FROM storage_bonus") + // Close the test database connection + err = db.Close() + if err != nil { + log.Fatalf("error closing test database connection: %v", err) + } + // Exit with the result of the tests + os.Exit(exitCode) +} + +func setupDatabase() error { + var err error + // Connect to the test database + db, err = sql.Open("postgres", "user=test_user password=test_pass host=localhost dbname=ente_test_db sslmode=disable") + if err != nil { + log.Fatalf("error connecting to test database: %v", err) + } + driver, err := postgres.WithInstance(db, &postgres.Config{}) + if err != nil { + log.Fatalf("error creating postgres driver: %v", err) + } + // Get the current working directory, find the path before "/pkg", and append "/migrations" + cwd, _ := os.Getwd() + cwd = strings.Split(cwd, "/pkg/")[0] + configFilePath := "file://" + filepath.Join(cwd, "migrations") + mig, err := migrate.NewWithDatabaseInstance( + configFilePath, "ente_test_db", driver) + if err != nil { + log.Fatalf("error creating migrations: %v", err) + } else { + //log.Println("Loaded migration scripts") + if err := mig.Up(); err != nil && err != migrate.ErrNoChange { + log.Fatalf("error running migrations: %v", err) + } + } + return err +} diff --git a/server/pkg/repo/tasklock.go b/server/pkg/repo/tasklock.go new file mode 100644 index 000000000..3aa593e13 --- /dev/null +++ b/server/pkg/repo/tasklock.go @@ -0,0 +1,87 @@ +package repo + +import ( + "database/sql" + + "github.com/ente-io/museum/pkg/utils/time" + "github.com/ente-io/stacktrace" + "github.com/sirupsen/logrus" +) + +// TaskLockRepository defines the methods for acquire and release locks +type TaskLockRepository struct { + DB *sql.DB +} + +func (repo *TaskLockRepository) AcquireLock(name string, lockUntil int64, lockedBy string) (bool, error) { + result, err := repo.DB.Exec( + `INSERT INTO task_lock(task_name, lock_until, locked_at, locked_by) VALUES($1, $2, $3, $4) + ON CONFLICT ON CONSTRAINT task_lock_pkey DO UPDATE SET lock_until = $2, locked_at = $3, locked_by = $4 + where task_lock.lock_until < $3`, name, lockUntil, time.Microseconds(), lockedBy) + if err != nil { + return false, stacktrace.Propagate(err, "") + } + + rowsAffected, err := result.RowsAffected() + if err != nil { + return false, stacktrace.Propagate(err, "") + } + + return rowsAffected == 1, nil +} + +// ExtendLock updates the locked_at and locked_until of an existing lock (held +// by `lockedBy`). +// +// Returns false if there is no such existing lock. +func (repo *TaskLockRepository) ExtendLock(name string, lockUntil int64, lockedBy string) (bool, error) { + result, err := repo.DB.Exec( + `UPDATE task_lock SET locked_at = $1, lock_until = $2 + WHERE task_name = $3 AND locked_by = $4`, + time.Microseconds(), lockUntil, name, lockedBy) + if err != nil { + return false, stacktrace.Propagate(err, "") + } + + rowsAffected, err := result.RowsAffected() + if err != nil { + return false, stacktrace.Propagate(err, "") + } + + return rowsAffected == 1, nil +} + +// LastLockedAt returns the time (epoch microseconds) at which the lock with +// `name` was last acquired or refreshed. +// +// If there is no such lock, it'll return sql.ErrNoRows. +func (repo *TaskLockRepository) LastLockedAt(name string) (int64, error) { + row := repo.DB.QueryRow( + `SELECT locked_at FROM task_lock WHERE task_name = $1`, name) + var lockedAt int64 + err := row.Scan(&lockedAt) + if err != nil { + return 0, stacktrace.Propagate(err, "") + } + return lockedAt, nil +} + +func (repo *TaskLockRepository) ReleaseLock(name string) error { + _, err := repo.DB.Exec(`DELETE FROM task_lock WHERE task_name = $1`, name) + return stacktrace.Propagate(err, "") +} + +func (repo *TaskLockRepository) CleanupExpiredLocks() error { + result, err := repo.DB.Exec(`DELETE FROM task_lock WHERE lock_until < $1`, time.Microseconds()) + if err != nil { + return stacktrace.Propagate(err, "") + } + rowsAffected, err := result.RowsAffected() + if err != nil { + return stacktrace.Propagate(err, "") + } + if rowsAffected > 0 { + logrus.WithField("expired_locks", rowsAffected).Error("Non zero expired locks") + } + return nil +} diff --git a/server/pkg/repo/trash.go b/server/pkg/repo/trash.go new file mode 100644 index 000000000..3d1cac2bd --- /dev/null +++ b/server/pkg/repo/trash.go @@ -0,0 +1,443 @@ +package repo + +import ( + "context" + "database/sql" + "errors" + "fmt" + "strings" + + "github.com/ente-io/museum/ente" + "github.com/ente-io/museum/pkg/utils/time" + "github.com/ente-io/stacktrace" + "github.com/lib/pq" + "github.com/sirupsen/logrus" +) + +const ( + // TrashDurationInDays number of days after which file will be removed from trash + TrashDurationInDays = 30 + // TrashDiffLimit is the default limit for number of items server will attempt to return when clients + // ask for changes. + TrashDiffLimit = 2500 + + TrashBatchSize = 1000 + + EmptyTrashQueueItemSeparator = "::" +) + +type FileWithUpdatedAt struct { + FileID int64 + UpdatedAt int64 +} + +type TrashRepository struct { + DB *sql.DB + ObjectRepo *ObjectRepository + FileRepo *FileRepository + QueueRepo *QueueRepository +} + +func (t *TrashRepository) InsertItems(ctx context.Context, tx *sql.Tx, userID int64, items []ente.TrashItemRequest) error { + if len(items) == 0 { + return nil + } + lb := 0 + size := len(items) + deletedBy := time.NDaysFromNow(TrashDurationInDays) + for lb < size { + ub := lb + TrashBatchSize + if ub > size { + ub = size + } + slicedList := items[lb:ub] + + var inserts []string + var params []interface{} + updatedAt := time.Microseconds() + query := "INSERT INTO trash(file_id, collection_id, user_id, delete_by, updated_at) VALUES " + for i, v := range slicedList { + inserts = append(inserts, fmt.Sprintf("($%d, $%d, $%d, $%d, $%d)", i*5+1, i*5+2, i*5+3, i*5+4, i*5+5)) + params = append(params, v.FileID, v.CollectionID, userID, deletedBy, updatedAt) + } + queryVals := strings.Join(inserts, ",") + query = query + queryVals + query = query + ` ON CONFLICT (file_id) DO UPDATE SET(is_restored, delete_by, updated_at) = ` + + fmt.Sprintf("(FALSE, $%d, $%d)", len(slicedList)*5+1, len(slicedList)*5+2) + ` WHERE trash.is_deleted = FALSE` + params = append(params, deletedBy, updatedAt) + _, err := tx.ExecContext(ctx, query, params...) + if err != nil { + return stacktrace.Propagate(err, "") + } + lb += TrashBatchSize + } + return nil +} + +func (t *TrashRepository) GetDiff(userID int64, sinceTime int64, limit int, app ente.App) ([]ente.Trash, error) { + rows, err := t.DB.Query(` + SELECT t.file_id, t.user_id, t.collection_id, cf.encrypted_key, cf.key_decryption_nonce, + f.file_decryption_header, f.thumbnail_decryption_header, f.metadata_decryption_header, + f.encrypted_metadata, f.magic_metadata, f.updation_time, f.info, + t.is_deleted, t.is_restored, t.created_at, t.updated_at, t.delete_by + FROM trash t + JOIN collection_files cf ON t.file_id = cf.file_id AND t.collection_id = cf.collection_id + JOIN files f ON f.file_id = t.file_id + AND t.user_id = $1 + AND f.owner_id = $1 + AND t.updated_at > $2 + JOIN collections c ON c.collection_id = t.collection_id + WHERE c.app = $4 + ORDER BY t.updated_at + LIMIT $3 +`, + userID, sinceTime, limit, app) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + return convertRowsToTrash(rows) +} + +func (t *TrashRepository) GetFilesWithVersion(userID int64, updateAtTime int64) ([]ente.Trash, error) { + rows, err := t.DB.Query(` + SELECT t.file_id, t.user_id, t.collection_id, cf.encrypted_key, cf.key_decryption_nonce, + f.file_decryption_header, f.thumbnail_decryption_header, f.metadata_decryption_header, + f.encrypted_metadata, f.magic_metadata, f.updation_time, f.info, + t.is_deleted, t.is_restored, t.created_at, t.updated_at, t.delete_by + FROM trash t + JOIN collection_files cf ON t.file_id = cf.file_id AND t.collection_id = cf.collection_id + JOIN files f ON f.file_id = t.file_id + AND t.user_id = $1 + AND t.updated_at = $2`, + userID, updateAtTime) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + return convertRowsToTrash(rows) +} + +func (t *TrashRepository) TrashFiles(fileIDs []int64, userID int64, trash ente.TrashRequest) error { + updationTime := time.Microseconds() + ctx := context.Background() + tx, err := t.DB.BeginTx(ctx, nil) + if err != nil { + return stacktrace.Propagate(err, "") + } + rows, err := tx.QueryContext(ctx, `SELECT DISTINCT collection_id FROM + collection_files WHERE file_id = ANY($1) AND is_deleted = $2`, pq.Array(fileIDs), false) + if err != nil { + return stacktrace.Propagate(err, "") + } + defer rows.Close() + cIDs := make([]int64, 0) + for rows.Next() { + var cID int64 + if err := rows.Scan(&cID); err != nil { + return stacktrace.Propagate(err, "") + } + cIDs = append(cIDs, cID) + } + _, err = tx.ExecContext(ctx, `UPDATE collection_files + SET is_deleted = $1, updation_time = $2 WHERE file_id = ANY($3)`, + true, updationTime, pq.Array(fileIDs)) + if err != nil { + tx.Rollback() + return stacktrace.Propagate(err, "") + } + _, err = tx.ExecContext(ctx, `UPDATE collections SET updation_time = $1 + WHERE collection_id = ANY ($2)`, updationTime, pq.Array(cIDs)) + if err != nil { + tx.Rollback() + return stacktrace.Propagate(err, "") + } + err = t.InsertItems(ctx, tx, userID, trash.TrashItems) + if err != nil { + tx.Rollback() + return stacktrace.Propagate(err, "") + } + err = tx.Commit() + return stacktrace.Propagate(err, "") +} + +// CleanUpDeletedFilesFromCollection deletes the files from the collection if the files are deleted from the trash +func (t *TrashRepository) CleanUpDeletedFilesFromCollection(ctx context.Context, fileIDs []int64, userID int64) error { + err := t.verifyFilesAreDeleted(ctx, userID, fileIDs) + if err != nil { + return stacktrace.Propagate(err, "deleted files check failed") + } + tx, err := t.DB.BeginTx(ctx, nil) + if err != nil { + return stacktrace.Propagate(err, "") + } + rows, err := tx.QueryContext(ctx, `SELECT DISTINCT collection_id FROM + collection_files WHERE file_id = ANY($1) AND is_deleted = $2`, pq.Array(fileIDs), false) + if err != nil { + return stacktrace.Propagate(err, "") + } + defer rows.Close() + cIDs := make([]int64, 0) + for rows.Next() { + var cID int64 + if err := rows.Scan(&cID); err != nil { + return stacktrace.Propagate(err, "") + } + cIDs = append(cIDs, cID) + } + updationTime := time.Microseconds() + _, err = tx.ExecContext(ctx, `UPDATE collection_files + SET is_deleted = $1, updation_time = $2 WHERE file_id = ANY($3)`, + true, updationTime, pq.Array(fileIDs)) + if err != nil { + tx.Rollback() + return stacktrace.Propagate(err, "") + } + _, err = tx.ExecContext(ctx, `UPDATE collections SET updation_time = $1 + WHERE collection_id = ANY ($2)`, updationTime, pq.Array(cIDs)) + if err != nil { + tx.Rollback() + return stacktrace.Propagate(err, "") + } + err = tx.Commit() + return stacktrace.Propagate(err, "") +} + +func (t *TrashRepository) Delete(ctx context.Context, userID int64, fileIDs []int64) error { + if len(fileIDs) > TrashDiffLimit { + return fmt.Errorf("can not delete more than %d in one go", TrashDiffLimit) + } + // find file_ids from the trash which belong to the user and can be deleted + // skip restored and already deleted files + fileIDsInTrash, _, err := t.GetFilesInTrashState(ctx, userID, fileIDs) + if err != nil { + return err + } + tx, err := t.DB.BeginTx(ctx, nil) + if err != nil { + return stacktrace.Propagate(err, "") + } + + logrus.WithField("fileIDs", fileIDsInTrash).Info("deleting files") + _, err = tx.ExecContext(ctx, `UPDATE trash SET is_deleted= true WHERE file_id = ANY ($1)`, pq.Array(fileIDsInTrash)) + if err != nil { + if rollbackErr := tx.Rollback(); rollbackErr != nil { + logrus.WithError(rollbackErr).Error("transaction rollback failed") + return stacktrace.Propagate(rollbackErr, "") + } + return stacktrace.Propagate(err, "") + } + + err = t.FileRepo.scheduleDeletion(ctx, tx, fileIDsInTrash, userID) + if err != nil { + if rollbackErr := tx.Rollback(); rollbackErr != nil { + logrus.WithError(rollbackErr).Error("transaction rollback failed") + return stacktrace.Propagate(rollbackErr, "") + } + return stacktrace.Propagate(err, "") + } + return tx.Commit() +} + +// GetFilesInTrashState for a given userID and fileIDs, return the list of fileIDs which are actually present in +// trash and is not deleted or restored yet. +func (t *TrashRepository) GetFilesInTrashState(ctx context.Context, userID int64, fileIDs []int64) ([]int64, bool, error) { + rows, err := t.DB.Query(`SELECT file_id FROM trash + WHERE user_id = $1 AND file_id = ANY ($2) + AND is_deleted = FALSE AND is_restored = FALSE`, userID, pq.Array(fileIDs)) + if err != nil { + return nil, false, stacktrace.Propagate(err, "") + } + fileIDsInTrash, err := convertRowsToFileId(rows) + if err != nil { + return nil, false, stacktrace.Propagate(err, "") + } + + canRestoreOrDeleteAllFiles := len(fileIDsInTrash) == len(fileIDs) + if !canRestoreOrDeleteAllFiles { + logrus.WithFields(logrus.Fields{ + "user_id": userID, + "input_fileIds": fileIDs, + "trash_fileIds": fileIDsInTrash, + }).Warn("mismatch in input fileIds and fileIDs present in trash") + } + return fileIDsInTrash, canRestoreOrDeleteAllFiles, nil +} + +// verifyFilesAreDeleted for a given userID and fileIDs, this method verifies that given files are actually deleted +func (t *TrashRepository) verifyFilesAreDeleted(ctx context.Context, userID int64, fileIDs []int64) error { + rows, err := t.DB.QueryContext(ctx, `SELECT file_id FROM trash + WHERE user_id = $1 AND file_id = ANY ($2) + AND is_deleted = TRUE AND is_restored = FALSE`, userID, pq.Array(fileIDs)) + if err != nil { + return stacktrace.Propagate(err, "") + } + filesDeleted, err := convertRowsToFileId(rows) + if err != nil { + return stacktrace.Propagate(err, "") + } + + areAllFilesDeleted := len(filesDeleted) == len(fileIDs) + if !areAllFilesDeleted { + logrus.WithFields(logrus.Fields{ + "user_id": userID, + "input_fileIds": fileIDs, + "trash_fileIds": filesDeleted, + }).Error("all file ids are not deleted from trash") + return stacktrace.NewError("all file ids are not deleted from trash") + } + + // get the size of file from object_keys table + row := t.DB.QueryRowContext(ctx, `SELECT coalesce(sum(size),0) FROM object_keys WHERE file_id = ANY($1) and is_deleted = FALSE`, + pq.Array(fileIDs)) + var totalUsage int64 + err = row.Scan(&totalUsage) + if err != nil { + if errors.Is(err, sql.ErrNoRows) { + totalUsage = 0 + } else { + return stacktrace.Propagate(err, "failed to get total usage for fileIDs") + } + } + if totalUsage != 0 { + logrus.WithFields(logrus.Fields{ + "user_id": userID, + "input_fileIds": fileIDs, + "trash_fileIds": filesDeleted, + "total_usage": totalUsage, + }).Error("object_keys table still has entries for deleted files") + return stacktrace.NewError("object_keys table still has entries for deleted files") + } + return nil +} + +// GetFilesIDsForDeletion for given userID and lastUpdateAt timestamp, returns the fileIDs which are in trash and +// where last updated_at before lastUpdateAt timestamp. +func (t *TrashRepository) GetFilesIDsForDeletion(userID int64, lastUpdatedAt int64) ([]int64, error) { + rows, err := t.DB.Query(`SELECT file_id FROM trash + WHERE user_id = $1 AND updated_at <= $2 AND is_deleted = FALSE AND is_restored = FALSE`, userID, lastUpdatedAt) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + fileIDs, err := convertRowsToFileId(rows) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + return fileIDs, nil +} + +// GetTimeStampForLatestNonDeletedEntry returns the updated at timestamp for the latest,non-deleted entry in the trash +func (t *TrashRepository) GetTimeStampForLatestNonDeletedEntry(userID int64) (*int64, error) { + row := t.DB.QueryRow(`SELECT max(updated_at) FROM trash WHERE user_id = $1 AND is_deleted = FALSE AND is_restored = FALSE`, userID) + var updatedAt *int64 + err := row.Scan(&updatedAt) + if errors.Is(err, sql.ErrNoRows) { + return nil, nil + } + return updatedAt, stacktrace.Propagate(err, "") +} + +// GetUserIDToFileIDsMapForDeletion returns map of userID to fileIds, where the file ids which should be deleted by now +func (t *TrashRepository) GetUserIDToFileIDsMapForDeletion() (map[int64][]int64, error) { + rows, err := t.DB.Query(`SELECT user_id, file_id FROM trash + WHERE delete_by <= $1 AND is_deleted = FALSE AND is_restored = FALSE limit $2`, + time.Microseconds(), TrashDiffLimit) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + defer rows.Close() + result := make(map[int64][]int64, 0) + for rows.Next() { + var userID, fileID int64 + if err = rows.Scan(&userID, &fileID); err != nil { + return nil, stacktrace.Propagate(err, "") + } + if fileIDs, ok := result[userID]; ok { + result[userID] = append(fileIDs, fileID) + } else { + result[userID] = []int64{fileID} + } + } + return result, nil +} + +// GetFileIdsForDroppingMetadata retrieves file IDs of deleted files for metadata scrubbing. +// It returns files that were deleted after the provided timestamp (sinceUpdatedAt) and have been in the trash for at least 50 days. +// This delay ensures compliance with deletion locks. +// The method orders the results by the 'updated_at' field in ascending order and limits the results to 'TrashDiffLimit' + 1. +// If multiple files have the same 'updated_at' timestamp and are at the limit boundary, they are excluded to prevent partial scrubbing. +// +// Parameters: +// +// sinceUpdatedAt: The timestamp (in microseconds) to filter files that were deleted after this time. +// +// Returns: +// +// A slice of FileWithUpdatedAt: Each item contains a file ID and its corresponding 'updated_at' timestamp. +// error: If there is any issue in executing the query, an error is returned. +// +// Note: The method returns an empty slice if no matching files are found. +func (t *TrashRepository) GetFileIdsForDroppingMetadata(sinceUpdatedAt int64) ([]FileWithUpdatedAt, error) { + rows, err := t.DB.Query(` + select file_id, updated_at from trash where is_deleted=true AND updated_at > $1 +AND updated_at < (now_utc_micro_seconds() - (24::BIGINT * 50* 60 * 60 * 1000 * 1000)) +order by updated_at ASC limit $2 +`, sinceUpdatedAt, TrashDiffLimit+1) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + var fileWithUpdatedAt []FileWithUpdatedAt + for rows.Next() { + var fileID, updatedAt int64 + if err = rows.Scan(&fileID, &updatedAt); err != nil { + return nil, stacktrace.Propagate(err, "") + } + fileWithUpdatedAt = append(fileWithUpdatedAt, FileWithUpdatedAt{ + FileID: fileID, + UpdatedAt: updatedAt, + }) + } + + if len(fileWithUpdatedAt) == 0 { + return []FileWithUpdatedAt{}, nil + } + if len(fileWithUpdatedAt) < TrashDiffLimit { + return fileWithUpdatedAt, nil + } + + // from the end ignore the fileIds from fileWithUpdatedAt that have the same updatedAt. + // this is to avoid scrubbing partial list of files that have same updatedAt as due to the limit not + // all files with the same updatedAt are returned. + lastUpdatedAt := fileWithUpdatedAt[len(fileWithUpdatedAt)-1].UpdatedAt + var i = len(fileWithUpdatedAt) - 1 + for ; i >= 0; i-- { + if fileWithUpdatedAt[i].UpdatedAt != lastUpdatedAt { + // found index (from end) where file's version is different from given version + break + } + } + return fileWithUpdatedAt[0 : i+1], nil +} + +func (t *TrashRepository) EmptyTrash(ctx context.Context, userID int64, lastUpdatedAt int64) error { + itemID := fmt.Sprintf("%d%s%d", userID, EmptyTrashQueueItemSeparator, lastUpdatedAt) + return t.QueueRepo.InsertItem(ctx, TrashEmptyQueue, itemID) +} + +func convertRowsToTrash(rows *sql.Rows) ([]ente.Trash, error) { + defer rows.Close() + trashFiles := make([]ente.Trash, 0) + for rows.Next() { + var ( + trash ente.Trash + ) + err := rows.Scan(&trash.File.ID, &trash.File.OwnerID, &trash.File.CollectionID, &trash.File.EncryptedKey, &trash.File.KeyDecryptionNonce, + &trash.File.File.DecryptionHeader, &trash.File.Thumbnail.DecryptionHeader, &trash.File.Metadata.DecryptionHeader, + &trash.File.Metadata.EncryptedData, &trash.File.MagicMetadata, &trash.File.UpdationTime, &trash.File.Info, &trash.IsDeleted, &trash.IsRestored, + &trash.CreatedAt, &trash.UpdatedAt, &trash.DeleteBy) + if err != nil { + return trashFiles, stacktrace.Propagate(err, "") + } + + trashFiles = append(trashFiles, trash) + } + return trashFiles, nil +} diff --git a/server/pkg/repo/twofactor.go b/server/pkg/repo/twofactor.go new file mode 100644 index 000000000..fdbe228a1 --- /dev/null +++ b/server/pkg/repo/twofactor.go @@ -0,0 +1,126 @@ +package repo + +import ( + "database/sql" + + "github.com/ente-io/museum/ente" + "github.com/ente-io/museum/pkg/utils/crypto" + "github.com/ente-io/museum/pkg/utils/time" + "github.com/ente-io/stacktrace" +) + +type TwoFactorRepository struct { + DB *sql.DB + SecretEncryptionKey []byte +} + +// GetTwoFactorSecret gets the user's two factor secret +func (repo *TwoFactorRepository) GetTwoFactorSecret(userID int64) (string, error) { + var encryptedTwoFASecret, nonce []byte + row := repo.DB.QueryRow(`SELECT encrypted_two_factor_secret, two_factor_secret_decryption_nonce FROM two_factor WHERE user_id = $1`, userID) + err := row.Scan(&encryptedTwoFASecret, &nonce) + if err != nil { + return "", stacktrace.Propagate(err, "") + } + twoFASecret, err := crypto.Decrypt(encryptedTwoFASecret, repo.SecretEncryptionKey, nonce) + if err != nil { + return "", stacktrace.Propagate(err, "") + } + return twoFASecret, nil +} + +// UpdateTwoFactorStatus the activates/deactivates user's two factor +func (repo *TwoFactorRepository) UpdateTwoFactorStatus(userID int64, status bool) error { + _, err := repo.DB.Exec(`UPDATE users SET is_two_factor_enabled = $1 WHERE user_id = $2`, status, userID) + return stacktrace.Propagate(err, "") +} + +// AddTwoFactorSession added a new two factor session a user +func (repo *TwoFactorRepository) AddTwoFactorSession(userID int64, sessionID string, expirationTime int64) error { + _, err := repo.DB.Exec(`INSERT INTO two_factor_sessions(user_id, session_id, creation_time, expiration_time) VALUES($1, $2, $3, $4)`, + userID, sessionID, time.Microseconds(), expirationTime) + return stacktrace.Propagate(err, "") +} + +// RemoveExpiredTwoFactorSessions removes all two factor sessions that have expired +func (repo *TwoFactorRepository) RemoveExpiredTwoFactorSessions() error { + _, err := repo.DB.Exec(`DELETE FROM two_factor_sessions WHERE expiration_time <= $1`, + time.Microseconds()) + return stacktrace.Propagate(err, "") +} + +// GetUserIDWithTwoFactorSession returns the userID associated with a given session +func (repo *TwoFactorRepository) GetUserIDWithTwoFactorSession(sessionID string) (int64, error) { + row := repo.DB.QueryRow(`SELECT user_id FROM two_factor_sessions WHERE session_id = $1 AND expiration_time > $2`, sessionID, time.Microseconds()) + var id int64 + err := row.Scan(&id) + if err != nil { + return -1, stacktrace.Propagate(err, "") + } + return id, nil +} + +// GetRecoveryKeyEncryptedTwoFactorSecret gets the user two factor encrypted with recovery key +func (repo *TwoFactorRepository) GetRecoveryKeyEncryptedTwoFactorSecret(userID int64) (ente.TwoFactorRecoveryResponse, error) { + var response ente.TwoFactorRecoveryResponse + row := repo.DB.QueryRow(`SELECT recovery_encrypted_two_factor_secret, recovery_two_factor_secret_decryption_nonce FROM two_factor WHERE user_id = $1`, userID) + err := row.Scan(&response.EncryptedSecret, &response.SecretDecryptionNonce) + if err != nil { + return ente.TwoFactorRecoveryResponse{}, stacktrace.Propagate(err, "") + } + return response, nil +} + +// VerifyTwoFactorSecret verifies the if a two secret factor secret belongs to a user +func (repo *TwoFactorRepository) VerifyTwoFactorSecret(userID int64, secretHash string) (bool, error) { + var exists bool + row := repo.DB.QueryRow(`SELECT EXISTS( SELECT 1 FROM two_factor WHERE user_id = $1 AND two_factor_secret_hash = $2)`, userID, secretHash) + err := row.Scan(&exists) + if err != nil { + return false, stacktrace.Propagate(err, "") + } + return exists, nil +} + +// SetTempTwoFactorSecret sets the two factor secret for a user when he tries to setup a new two-factor app +func (repo *TwoFactorRepository) SetTempTwoFactorSecret(userID int64, secret ente.EncryptionResult, secretHash string, expirationTime int64) error { + _, err := repo.DB.Exec(`INSERT INTO temp_two_factor(user_id, encrypted_two_factor_secret, two_factor_secret_decryption_nonce, two_factor_secret_hash, creation_time, expiration_time) + VALUES($1, $2, $3, $4, $5, $6)`, + userID, secret.Cipher, secret.Nonce, secretHash, time.Microseconds(), expirationTime) + return stacktrace.Propagate(err, "") +} + +// GetTempTwoFactorSecret gets the user's two factor secret for validing and enabling a new two-factor configuration +func (repo *TwoFactorRepository) GetTempTwoFactorSecret(userID int64) ([]ente.EncryptionResult, []string, error) { + rows, err := repo.DB.Query(`SELECT encrypted_two_factor_secret, two_factor_secret_decryption_nonce, two_factor_secret_hash FROM temp_two_factor WHERE user_id = $1 AND expiration_time > $2`, userID, time.Microseconds()) + if err != nil { + return make([]ente.EncryptionResult, 0), make([]string, 0), stacktrace.Propagate(err, "") + } + defer rows.Close() + encryptedSecrets := make([]ente.EncryptionResult, 0) + hashedSecrets := make([]string, 0) + for rows.Next() { + var encryptedTwoFASecret ente.EncryptionResult + var secretHash string + err := rows.Scan(&encryptedTwoFASecret.Cipher, &encryptedTwoFASecret.Nonce, &secretHash) + if err != nil { + return make([]ente.EncryptionResult, 0), make([]string, 0), stacktrace.Propagate(err, "") + } + encryptedSecrets = append(encryptedSecrets, encryptedTwoFASecret) + hashedSecrets = append(hashedSecrets, secretHash) + } + return encryptedSecrets, hashedSecrets, nil +} + +// RemoveTempTwoFactorSecret removes the specified secret with hash value `secretHash` +func (repo *TwoFactorRepository) RemoveTempTwoFactorSecret(secretHash string) error { + _, err := repo.DB.Exec(`DELETE FROM temp_two_factor WHERE two_factor_secret_hash = $1`, secretHash) + return stacktrace.Propagate(err, "") +} + +// RemoveExpiredTempTwoFactorSecrets removes all two temp factor secrets that have expired +func (repo *TwoFactorRepository) RemoveExpiredTempTwoFactorSecrets() error { + _, err := repo.DB.Exec(`DELETE FROM temp_two_factor WHERE expiration_time <= $1`, + time.Microseconds()) + return stacktrace.Propagate(err, "") +} diff --git a/server/pkg/repo/usage.go b/server/pkg/repo/usage.go new file mode 100644 index 000000000..c1e4cdaa3 --- /dev/null +++ b/server/pkg/repo/usage.go @@ -0,0 +1,66 @@ +package repo + +import ( + "context" + "database/sql" + "errors" + + "github.com/ente-io/stacktrace" + "github.com/lib/pq" +) + +// UsageRepository defines the methods tracking and fetching usage related date +type UsageRepository struct { + DB *sql.DB + UserRepo *UserRepository +} + +// GetUsage gets the Storage usage of a user +func (repo *UsageRepository) GetUsage(userID int64) (int64, error) { + row := repo.DB.QueryRow(`SELECT storage_consumed FROM usage WHERE user_id = $1`, + userID) + var usage int64 + err := row.Scan(&usage) + if errors.Is(err, sql.ErrNoRows) { + return 0, nil + } + return usage, stacktrace.Propagate(err, "") +} + +// Create inserts a new entry for the given user. If entry already exists, it doesn't nothing +func (repo *UsageRepository) Create(userID int64) error { + _, err := repo.DB.Exec(`INSERT INTO usage(user_id, storage_consumed) VALUES ($1,$2) ON CONFLICT DO NOTHING;`, + userID, //$1 user_id + 0, // $2 initial value for storage consumed + ) + return stacktrace.Propagate(err, "failed to insert/update") +} + +// GetCombinedUsage gets the sum of Storage usage of the list of userIDS +func (repo *UsageRepository) GetCombinedUsage(ctx context.Context, userIDs []int64) (int64, error) { + row := repo.DB.QueryRowContext(ctx, `SELECT coalesce(sum(storage_consumed),0) FROM usage WHERE user_id = ANY($1)`, + pq.Array(userIDs)) + var totalUsage int64 + err := row.Scan(&totalUsage) + if errors.Is(err, sql.ErrNoRows) { + return 0, nil + } + return totalUsage, stacktrace.Propagate(err, "") +} + +// StorageForFamilyAdmin calculates the total storage consumed by the family for a given adminID +func (repo *UsageRepository) StorageForFamilyAdmin(adminID int64) (int64, error) { + query := ` + SELECT COALESCE(SUM(storage_consumed), 0) + FROM users + LEFT JOIN families ON users.family_admin_id = families.admin_id AND families.status IN ('SELF', 'ACCEPTED') + LEFT JOIN usage ON families.member_id = usage.user_id + WHERE users.user_id = $1 + ` + var totalStorage int64 + err := repo.DB.QueryRow(query, adminID).Scan(&totalStorage) + if errors.Is(err, sql.ErrNoRows) { + return 0, nil + } + return totalStorage, stacktrace.Propagate(err, "") +} diff --git a/server/pkg/repo/user.go b/server/pkg/repo/user.go new file mode 100644 index 000000000..596d24c64 --- /dev/null +++ b/server/pkg/repo/user.go @@ -0,0 +1,398 @@ +package repo + +import ( + "context" + "database/sql" + "encoding/json" + "fmt" + "strings" + + "github.com/ente-io/museum/pkg/repo/passkey" + storageBonusRepo "github.com/ente-io/museum/pkg/repo/storagebonus" + "github.com/ente-io/stacktrace" + "github.com/lib/pq" + + "github.com/ente-io/museum/ente" + "github.com/ente-io/museum/pkg/utils/crypto" + "github.com/ente-io/museum/pkg/utils/time" +) + +const ( + // Format for updated email_hash once the account is deleted + DELETED_EMAIL_HASH_FORMAT = "deleted+%d@ente.io" +) + +// UserRepository defines the methods for inserting, updating and retrieving +// user entities from the underlying repository +type UserRepository struct { + DB *sql.DB + SecretEncryptionKey []byte + HashingKey []byte + StorageBonusRepo *storageBonusRepo.Repository + PasskeysRepository *passkey.Repository +} + +// Get returns a user indicated by the userID +func (repo *UserRepository) Get(userID int64) (ente.User, error) { + var user ente.User + var encryptedEmail, nonce []byte + row := repo.DB.QueryRow(`SELECT user_id, encrypted_email, email_decryption_nonce, email_hash, family_admin_id, creation_time, is_two_factor_enabled, email_mfa FROM users WHERE user_id = $1`, userID) + err := row.Scan(&user.ID, &encryptedEmail, &nonce, &user.Hash, &user.FamilyAdminID, &user.CreationTime, &user.IsTwoFactorEnabled, &user.IsEmailMFAEnabled) + if err != nil { + return ente.User{}, stacktrace.Propagate(err, "") + } + // We should not be calling Get user for a deleted account. The one valid + // use case is for internal/Admin APIs, where please we should instead be + // using GetUserByIDInternal. + if strings.EqualFold(user.Hash, fmt.Sprintf(DELETED_EMAIL_HASH_FORMAT, userID)) { + return user, stacktrace.Propagate(ente.ErrUserDeleted, fmt.Sprintf("user account is deleted %d", userID)) + } + email, err := crypto.Decrypt(encryptedEmail, repo.SecretEncryptionKey, nonce) + if err != nil { + return ente.User{}, stacktrace.Propagate(err, "") + } + user.Email = email + return user, nil +} + +// GetUserByIDInternal returns a user indicated by the id. Strickly use this method for internal APIs only. +func (repo *UserRepository) GetUserByIDInternal(id int64) (ente.User, error) { + var user ente.User + var encryptedEmail, nonce []byte + row := repo.DB.QueryRow(`SELECT user_id, encrypted_email, email_decryption_nonce, email_hash, family_admin_id, creation_time FROM users WHERE user_id = $1 AND encrypted_email IS NOT NULL`, id) + err := row.Scan(&user.ID, &encryptedEmail, &nonce, &user.Hash, &user.FamilyAdminID, &user.CreationTime) + if err != nil { + return ente.User{}, stacktrace.Propagate(err, "") + } + email, err := crypto.Decrypt(encryptedEmail, repo.SecretEncryptionKey, nonce) + if err != nil { + return ente.User{}, stacktrace.Propagate(err, "") + } + user.Email = email + return user, nil +} + +// Delete removes the email_hash and encrypted email information for the user. It replaces email_hash with placeholder value +// based on DELETED_EMAIL_HASH_FORMAT +func (repo *UserRepository) Delete(userID int64) error { + emailHash := fmt.Sprintf(DELETED_EMAIL_HASH_FORMAT, userID) + _, err := repo.DB.Exec(`UPDATE users SET encrypted_email = null, email_decryption_nonce = null, email_hash = $1 WHERE user_id = $2`, emailHash, userID) + return stacktrace.Propagate(err, "") +} + +// GetFamilyAdminID returns the *familyAdminID for the given userID +func (repo *UserRepository) GetFamilyAdminID(userID int64) (*int64, error) { + row := repo.DB.QueryRow(`SELECT family_admin_id FROM users WHERE user_id = $1`, userID) + var familyAdminID *int64 + err := row.Scan(&familyAdminID) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + return familyAdminID, nil +} + +// GetUserByEmailHash returns a user indicated by the emailHash +func (repo *UserRepository) GetUserByEmailHash(emailHash string) (ente.User, error) { + var user ente.User + row := repo.DB.QueryRow(`SELECT user_id, email_hash, creation_time FROM users WHERE email_hash = $1`, emailHash) + err := row.Scan(&user.ID, &user.Hash, &user.CreationTime) + if err != nil { + return ente.User{}, stacktrace.Propagate(err, "") + } + return user, nil +} + +// GetAll returns all users between sinceTime and tillTime (exclusive). +func (repo *UserRepository) GetAll(sinceTime int64, tillTime int64) ([]ente.User, error) { + rows, err := repo.DB.Query(`SELECT user_id, encrypted_email, email_decryption_nonce, email_hash, creation_time FROM users WHERE creation_time > $1 AND creation_time < $2 AND encrypted_email IS NOT NULL ORDER BY creation_time`, sinceTime, tillTime) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + defer rows.Close() + users := make([]ente.User, 0) + for rows.Next() { + var user ente.User + var encryptedEmail, nonce []byte + err := rows.Scan(&user.ID, &encryptedEmail, &nonce, &user.Hash, &user.CreationTime) + + if err != nil { + return users, stacktrace.Propagate(err, "") + } + email, err := crypto.Decrypt(encryptedEmail, repo.SecretEncryptionKey, nonce) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + user.Email = email + users = append(users, user) + } + return users, nil +} + +// GetUserUsageWithSubData will return current storage usage & basic information about subscription for given list +// of users. It's primarily used for fetching storage utilisation for a family/group of users +func (repo *UserRepository) GetUserUsageWithSubData(ctx context.Context, userIds []int64) ([]ente.UserUsageWithSubData, error) { + rows, err := repo.DB.QueryContext(ctx, `select encrypted_email, email_decryption_nonce, u.user_id, coalesce(storage_consumed , 0) as storage_used, storage, expiry_time + from users as u + left join (select storage_consumed, user_id from usage where user_id = ANY($1)) as us + on us.user_id=u.user_id + left join (select user_id,expiry_time, storage from subscriptions where user_id = ANY($1)) as s + on s.user_id = u.user_id + where u.user_id = ANY($1)`, pq.Array(userIds)) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + defer rows.Close() + result := make([]ente.UserUsageWithSubData, 0) + for rows.Next() { + var ( + usageData ente.UserUsageWithSubData + encryptedEmail, nonce []byte + ) + err = rows.Scan(&encryptedEmail, &nonce, &usageData.UserID, &usageData.StorageConsumed, &usageData.Storage, &usageData.ExpiryTime) + if err != nil { + return result, stacktrace.Propagate(err, "") + } + email, err := crypto.Decrypt(encryptedEmail, repo.SecretEncryptionKey, nonce) + if err != nil { + return nil, stacktrace.Propagate(err, "failed to decrypt email") + } + usageData.Email = &email + result = append(result, usageData) + } + return result, nil +} + +// Create creates a user with a given email address and returns the generated +// userID +func (repo *UserRepository) Create(encryptedEmail ente.EncryptionResult, emailHash string, source *string) (int64, error) { + var userID int64 + err := repo.DB.QueryRow(`INSERT INTO users(encrypted_email, email_decryption_nonce, email_hash, creation_time, source) VALUES($1, $2, $3, $4, $5) RETURNING user_id`, + encryptedEmail.Cipher, encryptedEmail.Nonce, emailHash, time.Microseconds(), source).Scan(&userID) + if err != nil { + return -1, stacktrace.Propagate(err, "") + } + return userID, nil +} + +// UpdateDeleteFeedback for a given user in the delete_feedback column of type jsonb +func (repo *UserRepository) UpdateDeleteFeedback(userID int64, feedback map[string]string) error { + // Convert the feedback map into JSON + feedbackJSON, err := json.Marshal(feedback) + if err != nil { + return stacktrace.Propagate(err, "Failed to marshal feedback into JSON") + } + // Execute the update query with the JSON + _, err = repo.DB.Exec(`UPDATE users SET delete_feedback = $1 WHERE user_id = $2`, feedbackJSON, userID) + return stacktrace.Propagate(err, "Failed to update delete feedback") +} + +// UpdateEmail updates the email address of a user +func (repo *UserRepository) UpdateEmail(userID int64, encryptedEmail ente.EncryptionResult, emailHash string) error { + _, err := repo.DB.Exec(`UPDATE users SET encrypted_email = $1, email_decryption_nonce = $2, email_hash = $3 WHERE user_id = $4`, encryptedEmail.Cipher, encryptedEmail.Nonce, emailHash, userID) + return stacktrace.Propagate(err, "") +} + +// GetUserIDWithEmail returns the userID associated with a provided email +func (repo *UserRepository) GetUserIDWithEmail(email string) (int64, error) { + trimmedEmail := strings.TrimSpace(email) + emailHash, err := crypto.GetHash(trimmedEmail, repo.HashingKey) + if err != nil { + return -1, stacktrace.Propagate(err, "") + } + row := repo.DB.QueryRow(`SELECT user_id FROM users WHERE email_hash = $1`, emailHash) + var userID int64 + err = row.Scan(&userID) + if err != nil { + return -1, stacktrace.Propagate(err, "") + } + return userID, nil +} + +// GetKeyAttributes gets the key attributes for a given user +func (repo *UserRepository) GetKeyAttributes(userID int64) (ente.KeyAttributes, error) { + row := repo.DB.QueryRow(`SELECT kek_salt, kek_hash_bytes, encrypted_key, key_decryption_nonce, public_key, encrypted_secret_key, secret_key_decryption_nonce, mem_limit, ops_limit, master_key_encrypted_with_recovery_key, master_key_decryption_nonce, recovery_key_encrypted_with_master_key, recovery_key_decryption_nonce FROM key_attributes WHERE user_id = $1`, userID) + var ( + keyAttributes ente.KeyAttributes + kekHashBytes []byte + masterKeyEncryptedWithRecoveryKey sql.NullString + masterKeyDecryptionNonce sql.NullString + recoveryKeyEncryptedWithMasterKey sql.NullString + recoveryKeyDecryptionNonce sql.NullString + ) + err := row.Scan(&keyAttributes.KEKSalt, + &kekHashBytes, + &keyAttributes.EncryptedKey, + &keyAttributes.KeyDecryptionNonce, + &keyAttributes.PublicKey, + &keyAttributes.EncryptedSecretKey, + &keyAttributes.SecretKeyDecryptionNonce, + &keyAttributes.MemLimit, + &keyAttributes.OpsLimit, + &masterKeyEncryptedWithRecoveryKey, + &masterKeyDecryptionNonce, + &recoveryKeyEncryptedWithMasterKey, + &recoveryKeyDecryptionNonce, + ) + if err != nil { + return ente.KeyAttributes{}, stacktrace.Propagate(err, "") + } + keyAttributes.KEKHash = string(kekHashBytes) + if masterKeyEncryptedWithRecoveryKey.Valid { + keyAttributes.MasterKeyEncryptedWithRecoveryKey = masterKeyEncryptedWithRecoveryKey.String + } + if masterKeyDecryptionNonce.Valid { + keyAttributes.MasterKeyDecryptionNonce = masterKeyDecryptionNonce.String + } + if recoveryKeyEncryptedWithMasterKey.Valid { + keyAttributes.RecoveryKeyEncryptedWithMasterKey = recoveryKeyEncryptedWithMasterKey.String + } + if recoveryKeyDecryptionNonce.Valid { + keyAttributes.RecoveryKeyDecryptionNonce = recoveryKeyDecryptionNonce.String + } + + return keyAttributes, nil +} + +// SetKeyAttributes sets the key attributes for a given user +func (repo *UserRepository) SetKeyAttributes(userID int64, keyAttributes ente.KeyAttributes) error { + _, err := repo.DB.Exec(`INSERT INTO key_attributes(user_id, kek_salt, kek_hash_bytes, encrypted_key, key_decryption_nonce, public_key, encrypted_secret_key, secret_key_decryption_nonce, mem_limit, ops_limit, master_key_encrypted_with_recovery_key, master_key_decryption_nonce, recovery_key_encrypted_with_master_key, recovery_key_decryption_nonce) VALUES($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14)`, + userID, keyAttributes.KEKSalt, []byte(keyAttributes.KEKHash), + keyAttributes.EncryptedKey, keyAttributes.KeyDecryptionNonce, + keyAttributes.PublicKey, keyAttributes.EncryptedSecretKey, + keyAttributes.SecretKeyDecryptionNonce, keyAttributes.MemLimit, keyAttributes.OpsLimit, + keyAttributes.MasterKeyEncryptedWithRecoveryKey, keyAttributes.MasterKeyDecryptionNonce, + keyAttributes.RecoveryKeyEncryptedWithMasterKey, keyAttributes.RecoveryKeyDecryptionNonce) + return stacktrace.Propagate(err, "") +} + +// UpdateKeys sets the keys of a user +func (repo *UserRepository) UpdateKeys(userID int64, keys ente.UpdateKeysRequest) error { + _, err := repo.DB.Exec(`UPDATE key_attributes SET kek_salt = $1, encrypted_key = $2, key_decryption_nonce = $3, mem_limit = $4, ops_limit = $5 WHERE user_id = $6`, + keys.KEKSalt, keys.EncryptedKey, keys.KeyDecryptionNonce, keys.MemLimit, keys.OpsLimit, userID) + return stacktrace.Propagate(err, "") +} + +// SetRecoveryKeyAttributes sets the recovery key and related attributes for a user +func (repo *UserRepository) SetRecoveryKeyAttributes(userID int64, keys ente.SetRecoveryKeyRequest) error { + _, err := repo.DB.Exec(`UPDATE key_attributes SET master_key_encrypted_with_recovery_key = $1, master_key_decryption_nonce = $2, recovery_key_encrypted_with_master_key = $3, recovery_key_decryption_nonce = $4 WHERE user_id = $5`, + keys.MasterKeyEncryptedWithRecoveryKey, keys.MasterKeyDecryptionNonce, keys.RecoveryKeyEncryptedWithMasterKey, keys.RecoveryKeyDecryptionNonce, userID) + return stacktrace.Propagate(err, "") +} + +// GetPublicKey returns the public key of a user +func (repo *UserRepository) GetPublicKey(userID int64) (string, error) { + row := repo.DB.QueryRow(`SELECT public_key FROM key_attributes WHERE user_id = $1`, userID) + var publicKey string + err := row.Scan(&publicKey) + return publicKey, stacktrace.Propagate(err, "") +} + +// GetUsersWithIndividualPlanWhoHaveExceededStorageQuota returns list of users who have consumed their storage quota +// and they are not part of any family plan +func (repo *UserRepository) GetUsersWithIndividualPlanWhoHaveExceededStorageQuota() ([]ente.User, error) { + rows, err := repo.DB.Query(` + SELECT users.user_id, users.encrypted_email, users.email_decryption_nonce, users.email_hash, usage.storage_consumed, subscriptions.storage + FROM users + INNER JOIN usage + ON users.user_id = usage.user_id + INNER JOIN subscriptions + ON users.user_id = subscriptions.user_id AND usage.storage_consumed > subscriptions.storage AND users.encrypted_email IS NOT NULL AND users.family_admin_id IS NULL; + `) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + refBonus, addOnBonus, bonusErr := repo.StorageBonusRepo.GetAllUsersSurplusBonus(context.Background()) + if bonusErr != nil { + return nil, stacktrace.Propagate(bonusErr, "failed to fetch bonusInfo") + } + defer rows.Close() + users := make([]ente.User, 0) + for rows.Next() { + var user ente.User + var encryptedEmail, nonce []byte + var storageConsumed, subStorage int64 + err := rows.Scan(&user.ID, &encryptedEmail, &nonce, &user.Hash, &storageConsumed, &subStorage) + if err != nil { + return users, stacktrace.Propagate(err, "") + } + // ignore deleted users + if strings.EqualFold(user.Hash, fmt.Sprintf(DELETED_EMAIL_HASH_FORMAT, &user.ID)) || len(encryptedEmail) == 0 { + continue + } + if refBonusStorage, ok := refBonus[user.ID]; ok { + addOnBonusStorage := addOnBonus[user.ID] + // cap usable ref bonus to the subscription storage + addOnBonus + if refBonusStorage > (subStorage + addOnBonusStorage) { + refBonusStorage = subStorage + addOnBonusStorage + } + if (storageConsumed) <= (subStorage + refBonusStorage + addOnBonusStorage) { + continue + } + } + email, err := crypto.Decrypt(encryptedEmail, repo.SecretEncryptionKey, nonce) + if err != nil { + return users, stacktrace.Propagate(err, "") + } + user.Email = email + users = append(users, user) + } + return users, nil +} + +// SetTwoFactorSecret sets the two factor secret for a user +func (repo *UserRepository) SetTwoFactorSecret(userID int64, secret ente.EncryptionResult, secretHash string, recoveryEncryptedTwoFactorSecret string, recoveryTwoFactorSecretDecryptionNonce string) error { + _, err := repo.DB.Exec(`INSERT INTO two_factor(user_id,encrypted_two_factor_secret,two_factor_secret_decryption_nonce,two_factor_secret_hash,recovery_encrypted_two_factor_secret,recovery_two_factor_secret_decryption_nonce) + VALUES($1, $2, $3, $4, $5, $6) + ON CONFLICT (user_id) DO UPDATE + SET encrypted_two_factor_secret = $2, + two_factor_secret_decryption_nonce = $3, + two_factor_secret_hash = $4, + recovery_encrypted_two_factor_secret = $5, + recovery_two_factor_secret_decryption_nonce = $6 + `, + userID, secret.Cipher, secret.Nonce, secretHash, recoveryEncryptedTwoFactorSecret, recoveryTwoFactorSecretDecryptionNonce) + return stacktrace.Propagate(err, "") +} + +// IsTwoFactorEnabled checks if a user's two factor is enabled or not +func (repo *UserRepository) IsTwoFactorEnabled(userID int64) (bool, error) { + var twoFAStatus bool + row := repo.DB.QueryRow(`SELECT is_two_factor_enabled FROM users WHERE user_id = $1`, userID) + err := row.Scan(&twoFAStatus) + if err != nil { + return false, stacktrace.Propagate(err, "") + } + return twoFAStatus, nil +} + +func (repo *UserRepository) HasPasskeys(userID int64) (hasPasskeys bool, err error) { + passkeys, err := repo.PasskeysRepository.GetUserPasskeys(userID) + hasPasskeys = len(passkeys) > 0 + return +} + +func (repo *UserRepository) GetEmailsFromHashes(hashes []string) ([]string, error) { + rows, err := repo.DB.Query(` + SELECT users.encrypted_email, users.email_decryption_nonce + FROM users + WHERE users.email_hash = ANY($1); + `, pq.Array(hashes)) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + defer rows.Close() + emails := make([]string, 0) + for rows.Next() { + var encryptedEmail, nonce []byte + err := rows.Scan(&encryptedEmail, &nonce) + if err != nil { + return emails, stacktrace.Propagate(err, "") + } + email, err := crypto.Decrypt(encryptedEmail, repo.SecretEncryptionKey, nonce) + if err != nil { + return emails, stacktrace.Propagate(err, "") + } + emails = append(emails, email) + } + return emails, nil +} diff --git a/server/pkg/repo/userauth.go b/server/pkg/repo/userauth.go new file mode 100644 index 000000000..c5f86e8ec --- /dev/null +++ b/server/pkg/repo/userauth.go @@ -0,0 +1,174 @@ +package repo + +import ( + "database/sql" + + "github.com/ente-io/museum/ente" + "github.com/ente-io/museum/pkg/utils/network" + + "github.com/ente-io/museum/pkg/utils/time" + "github.com/ente-io/stacktrace" +) + +// UserAuthRepository defines the methods for inserting, updating and retrieving +// one time tokens (currently) used for email verification. +type UserAuthRepository struct { + DB *sql.DB +} + +// AddOTT saves the provided one time token for the specified user +func (repo *UserAuthRepository) AddOTT(emailHash string, app ente.App, ott string, expirationTime int64) error { + _, err := repo.DB.Exec(`INSERT INTO otts(email_hash, ott, creation_time, expiration_time, app) + VALUES($1, $2, $3, $4, $5) + ON CONFLICT ON CONSTRAINT unique_otts_emailhash_ott DO UPDATE SET creation_time = $3, expiration_time = $4`, + emailHash, ott, time.Microseconds(), expirationTime, app) + return stacktrace.Propagate(err, "") +} + +// RemoveOTT removes the specified OTT (to be used when an OTT has been consumed) +func (repo *UserAuthRepository) RemoveOTT(emailHash string, ott string) error { + _, err := repo.DB.Exec(`DELETE FROM otts WHERE email_hash = $1 AND ott = $2`, emailHash, ott) + return stacktrace.Propagate(err, "") +} + +// RemoveExpiredOTTs removes all OTTs that have expired +func (repo *UserAuthRepository) RemoveExpiredOTTs() error { + _, err := repo.DB.Exec(`DELETE FROM otts WHERE expiration_time <= $1`, + time.Microseconds()) + return stacktrace.Propagate(err, "") +} + +// GetTokenCreationTime return the creation_time for the given token +func (repo *UserAuthRepository) GetTokenCreationTime(token string) (int64, error) { + row := repo.DB.QueryRow(`SELECT creation_time from tokens where token = $1`, token) + var result int64 + if err := row.Scan(&result); err != nil { + return 0, stacktrace.Propagate(err, "Failed to scan row") + } + return result, nil +} + +// GetValidOTTs returns the list of OTTs that haven't expired for a given user +func (repo *UserAuthRepository) GetValidOTTs(emailHash string, app ente.App) ([]string, error) { + rows, err := repo.DB.Query(`SELECT ott FROM otts WHERE email_hash = $1 AND app = $2 AND expiration_time > $3`, + emailHash, app, time.Microseconds()) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + defer rows.Close() + otts := make([]string, 0) + for rows.Next() { + var ott string + err := rows.Scan(&ott) + if err != nil { + return otts, stacktrace.Propagate(err, "") + } + otts = append(otts, ott) + } + + return otts, nil +} + +func (repo *UserAuthRepository) GetMaxWrongAttempts(emailHash string) (int, error) { + row := repo.DB.QueryRow(`SELECT COALESCE(MAX(wrong_attempt),0) FROM otts WHERE email_hash = $1 AND expiration_time > $2`, + emailHash, time.Microseconds()) + var wrongAttempt int + if err := row.Scan(&wrongAttempt); err != nil { + return 0, stacktrace.Propagate(err, "Failed to scan row") + } + return wrongAttempt, nil +} + +// RecordWrongAttemptForActiveOtt increases the wrong_attempt count for given emailHash and active ott. +// Assuming tha we keep deleting expired OTT, max(wrong_attempt) can be used to track brute-force attack +func (repo *UserAuthRepository) RecordWrongAttemptForActiveOtt(emailHash string) error { + _, err := repo.DB.Exec(`UPDATE otts SET wrong_attempt = otts.wrong_attempt + 1 + WHERE email_hash = $1 AND expiration_time > $2`, emailHash, time.Microseconds()) + if err != nil { + return stacktrace.Propagate(err, "Failed to update wrong attempt count") + } + return nil +} + +// AddToken saves the provided long lived token for the specified user +func (repo *UserAuthRepository) AddToken(userID int64, app ente.App, token string, ip string, userAgent string) error { + _, err := repo.DB.Exec(`INSERT INTO tokens(user_id, app, token, creation_time, ip, user_agent) VALUES($1, $2, $3, $4, $5, $6)`, + userID, app, token, time.Microseconds(), ip, userAgent) + return stacktrace.Propagate(err, "") +} + +// GetUserIDWithToken returns the userID associated with a given token +func (repo *UserAuthRepository) GetUserIDWithToken(token string, app ente.App) (int64, error) { + row := repo.DB.QueryRow(`SELECT user_id FROM tokens WHERE token = $1 AND app = $2 AND is_deleted = false`, token, app) + var id int64 + err := row.Scan(&id) + if err != nil { + return -1, stacktrace.Propagate(err, "") + } + return id, nil +} + +// RemoveToken marks the specified token (to be used when a user logs out) as deleted +func (repo *UserAuthRepository) RemoveToken(userID int64, token string) error { + _, err := repo.DB.Exec(`UPDATE tokens SET is_deleted = true WHERE user_id = $1 AND token = $2`, + userID, token) + return stacktrace.Propagate(err, "") +} + +// UpdateLastUsedAt updates the last used at timestamp for the particular token +func (repo *UserAuthRepository) UpdateLastUsedAt(userID int64, token string, ip string, userAgent string) error { + _, err := repo.DB.Exec(`UPDATE tokens SET ip = $1, user_agent = $2, last_used_at = $3 WHERE user_id = $4 AND token = $5`, + ip, userAgent, time.Microseconds(), userID, token) + return stacktrace.Propagate(err, "") +} + +// RemoveAllOtherTokens marks the all tokens apart from the specified one for a user as deleted +func (repo *UserAuthRepository) RemoveAllOtherTokens(userID int64, token string) error { + _, err := repo.DB.Exec(`UPDATE tokens SET is_deleted = true WHERE user_id = $1 AND token <> $2`, + userID, token) + return stacktrace.Propagate(err, "") +} + +func (repo *UserAuthRepository) RemoveDeletedTokens(expiryTime int64) error { + _, err := repo.DB.Exec(`DELETE FROM tokens WHERE is_deleted = true AND last_used_at < $1`, expiryTime) + return stacktrace.Propagate(err, "") +} + +// RemoveAllTokens marks the all tokens for a user as deleted +func (repo *UserAuthRepository) RemoveAllTokens(userID int64) error { + _, err := repo.DB.Exec(`UPDATE tokens SET is_deleted = true WHERE user_id = $1`, userID) + return stacktrace.Propagate(err, "") +} + +// GetActiveSessions returns the list of tokens that are valid for a given user +func (repo *UserAuthRepository) GetActiveSessions(userID int64, app ente.App) ([]ente.Session, error) { + rows, err := repo.DB.Query(`SELECT token, creation_time, ip, user_agent, last_used_at FROM tokens WHERE user_id = $1 AND app = $2 AND is_deleted = false`, userID, app) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + defer rows.Close() + sessions := make([]ente.Session, 0) + for rows.Next() { + var ip sql.NullString + var userAgent sql.NullString + var session ente.Session + err := rows.Scan(&session.Token, &session.CreationTime, &ip, &userAgent, &session.LastUsedTime) + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + if ip.Valid { + session.IP = ip.String + } else { + session.IP = "Unknown IP" + } + if userAgent.Valid { + session.UA = userAgent.String + session.PrettyUA = network.GetPrettyUA(userAgent.String) + } else { + session.UA = "Unknown Device" + session.PrettyUA = "Unknown Device" + } + sessions = append(sessions, session) + } + return sessions, nil +} diff --git a/server/pkg/repo/userentity/data.go b/server/pkg/repo/userentity/data.go new file mode 100644 index 000000000..86263ff24 --- /dev/null +++ b/server/pkg/repo/userentity/data.go @@ -0,0 +1,119 @@ +package userentity + +import ( + "context" + "database/sql" + "errors" + "fmt" + + model "github.com/ente-io/museum/ente/userentity" + "github.com/ente-io/stacktrace" + "github.com/google/uuid" + "github.com/sirupsen/logrus" +) + +// Create inserts a new entry +func (r *Repository) Create(ctx context.Context, userID int64, entry model.EntityDataRequest) (uuid.UUID, error) { + id := uuid.New() + err := r.DB.QueryRow(`INSERT into entity_data( + id, + user_id, + type, + encrypted_data, + header) VALUES ($1,$2,$3,$4,$5) RETURNING id`, + id, //$1 id + userID, // $2 user_id + entry.Type, // $3 type + entry.EncryptedData, // $4 encrypted_data + entry.Header). // $5 header + Scan(&id) + if err != nil { + return id, stacktrace.Propagate(err, "failed to create enity data") + } + return id, nil +} + +func (r *Repository) Get(ctx context.Context, userID int64, id uuid.UUID) (*model.EntityData, error) { + res := model.EntityData{} + row := r.DB.QueryRowContext(ctx, `SELECT + id, user_id, type, encrypted_data, header, is_deleted, created_at, updated_at + FROM entity_data + WHERE id = $1 AND + user_id = $2`, + id, // $1 + userID, // %2 // $3 + ) + err := row.Scan(&res.ID, &res.UserID, &res.Type, &res.EncryptedData, &res.Header, &res.IsDeleted, &res.CreatedAt, &res.UpdatedAt) + if err != nil { + return nil, stacktrace.Propagate(err, "failed to get entity data") + } + return &res, nil +} + +func (r *Repository) Delete(ctx context.Context, userID int64, id uuid.UUID) (bool, error) { + _, err := r.DB.ExecContext(ctx, + `UPDATE entity_data SET is_deleted = true, encrypted_data = NULL, header = NULL where id=$1 and user_id = $2`, + id, userID) + if err != nil { + return false, stacktrace.Propagate(err, fmt.Sprintf("faield to delele entity_data with id=%s", id)) + } + return true, nil +} + +func (r *Repository) Update(ctx context.Context, userID int64, req model.UpdateEntityDataRequest) error { + result, err := r.DB.ExecContext(ctx, + `UPDATE entity_data SET encrypted_data = $1, header = $2 where id=$3 and user_id = $4 and is_deleted = FALSE`, + req.EncryptedData, req.Header, req.ID, userID) + if err != nil { + return stacktrace.Propagate(err, "") + } + affected, err := result.RowsAffected() + if err != nil { + return stacktrace.Propagate(err, "") + } + if affected != 1 { + return stacktrace.Propagate(errors.New("exactly one row should be updated"), "") + } + return nil +} + +// GetDiff returns the &{[]model.EntityData} which have been added or +// modified after the given sinceTime +func (r *Repository) GetDiff(ctx context.Context, userID int64, eType model.EntityType, sinceTime int64, limit int16) ([]model.EntityData, error) { + rows, err := r.DB.QueryContext(ctx, `SELECT + id, user_id, type, encrypted_data, header, is_deleted, created_at, updated_at + FROM entity_data + WHERE user_id = $1 and type = $2 + and updated_at > $3 + ORDER BY updated_at + LIMIT $4`, + userID, + eType, // $2 + sinceTime, // $3 + limit, // $4 + ) + if err != nil { + return nil, stacktrace.Propagate(err, "GetDiff query failed") + } + return convertRowsToEntityData(rows) +} + +func convertRowsToEntityData(rows *sql.Rows) ([]model.EntityData, error) { + defer func() { + if err := rows.Close(); err != nil { + logrus.Error(err) + } + }() + result := make([]model.EntityData, 0) + for rows.Next() { + entity := model.EntityData{} + err := rows.Scan( + &entity.ID, &entity.UserID, &entity.Type, &entity.EncryptedData, &entity.Header, &entity.IsDeleted, + &entity.CreatedAt, &entity.UpdatedAt) + if err != nil { + return nil, stacktrace.Propagate(err, "failed to convert convertRowsToEntityData") + } + result = append(result, entity) + } + return result, nil +} diff --git a/server/pkg/repo/userentity/key.go b/server/pkg/repo/userentity/key.go new file mode 100644 index 000000000..18eccd14c --- /dev/null +++ b/server/pkg/repo/userentity/key.go @@ -0,0 +1,30 @@ +package userentity + +import ( + "context" + + model "github.com/ente-io/museum/ente/userentity" + "github.com/ente-io/stacktrace" +) + +func (r *Repository) CreateKey(ctx context.Context, userID int64, entry model.EntityKeyRequest) error { + _, err := r.DB.ExecContext(ctx, `INSERT into entity_key( + user_id, type, encrypted_key, header) VALUES ($1,$2,$3, $4)`, + userID, entry.Type, entry.EncryptedKey, entry.Header) + + if err != nil { + return stacktrace.Propagate(err, "Failed to createTotpEntry") + } + return nil +} + +func (r *Repository) GetKey(ctx context.Context, userID int64, eType model.EntityType) (model.EntityKey, error) { + row := r.DB.QueryRowContext(ctx, `SELECT user_id, type, encrypted_key, header, + created_at from entity_key where user_id = $1 and type = $2`, userID, eType) + var result model.EntityKey + err := row.Scan(&result.UserID, &result.Type, &result.EncryptedKey, &result.Header, &result.CreatedAt) + if err != nil { + return result, stacktrace.Propagate(err, "failed to entity key") + } + return result, nil +} diff --git a/server/pkg/repo/userentity/repository.go b/server/pkg/repo/userentity/repository.go new file mode 100644 index 000000000..5dc9ba9db --- /dev/null +++ b/server/pkg/repo/userentity/repository.go @@ -0,0 +1,11 @@ +package userentity + +import ( + "database/sql" +) + +// Repository defines the methods for inserting, updating and retrieving +// userentity related keys and entities from the underlying repository +type Repository struct { + DB *sql.DB +} diff --git a/server/pkg/utils/array/array.go b/server/pkg/utils/array/array.go new file mode 100644 index 000000000..98869ad5d --- /dev/null +++ b/server/pkg/utils/array/array.go @@ -0,0 +1,49 @@ +package array + +// UniqueInt64 returns unique elements in the input array +func UniqueInt64(input []int64) []int64 { + visited := map[int64]bool{} + var result []int64 + for _, value := range input { + // check if already the mapped + // variable is set to true or not + if !visited[value] { + visited[value] = true + // Append to result slice. + result = append(result, value) + } + } + return result +} + +// ContainsDuplicateInInt64Array returns true if the array contains duplicate elements. +func ContainsDuplicateInInt64Array(input []int64) bool { + visited := map[int64]bool{} + for _, value := range input { + if visited[value] { + return true + } + visited[value] = true + } + return false +} + +// StringInList returns true is given string is present inside list +func StringInList(a string, list []string) bool { + for _, b := range list { + if b == a { + return true + } + } + return false +} + +// int64InList returns true is given int64 is present inside list +func Int64InList(a int64, list []int64) bool { + for _, b := range list { + if b == a { + return true + } + } + return false +} diff --git a/server/pkg/utils/auth/auth.go b/server/pkg/utils/auth/auth.go new file mode 100644 index 000000000..6f8091998 --- /dev/null +++ b/server/pkg/utils/auth/auth.go @@ -0,0 +1,137 @@ +package auth + +import ( + "crypto/rand" + "encoding/base64" + "github.com/ente-io/museum/ente/cast" + "math/big" + "net/http" + "strconv" + "strings" + + "github.com/ente-io/museum/ente" + "github.com/ente-io/stacktrace" + + "github.com/gin-gonic/gin" + "golang.org/x/crypto/bcrypt" +) + +const ( + PublicAccessKey = "X-Public-Access-ID" + CastContext = "X-Cast-Context" +) + +// GenerateRandomBytes returns securely generated random bytes. +// It will return an error if the system's secure random +// number generator fails to function correctly, in which +// case the caller should not continue. +func GenerateRandomBytes(n int) ([]byte, error) { + b := make([]byte, n) + _, err := rand.Read(b) + // Note that err == nil only if we read len(b) bytes. + if err != nil { + return nil, stacktrace.Propagate(err, "") + } + + return b, nil +} + +// GenerateRandomInt returns a securely generated random integer in [0, n). +// +// It will return an error if the system's secure random number generator fails +// to function correctly, in which case the caller should not continue. +func GenerateRandomInt(n int64) (int64, error) { + r, err := rand.Int(rand.Reader, big.NewInt(n)) + if err != nil { + return 0, stacktrace.Propagate(err, "") + } + return r.Int64(), nil +} + +// GenerateURLSafeRandomString returns a URL-safe, base64 encoded +// securely generated random string. +// It will return an error if the system's secure random +// number generator fails to function correctly, in which +// case the caller should not continue. +func GenerateURLSafeRandomString(s int) (string, error) { + b, err := GenerateRandomBytes(s) + return base64.URLEncoding.EncodeToString(b), stacktrace.Propagate(err, "") +} + +// GetHashedPassword returns the has of a specified password +func GetHashedPassword(password string) (string, error) { + saltedBytes := []byte(password) + hashedBytes, err := bcrypt.GenerateFromPassword(saltedBytes, bcrypt.DefaultCost) + if err != nil { + return "", stacktrace.Propagate(err, "") + } + + hash := string(hashedBytes[:]) + return hash, nil +} + +// CompareHashes compares a bcrypt hashed password with its possible plaintext +// equivalent. Returns nil on success, or an error on failure. +func CompareHashes(hash string, s string) error { + existing := []byte(hash) + incoming := []byte(s) + return bcrypt.CompareHashAndPassword(existing, incoming) +} + +// GetUserID fetches the userID embedded in a request header +func GetUserID(header http.Header) int64 { + userID, _ := strconv.ParseInt(header.Get("X-Auth-User-ID"), 10, 64) + return userID +} + +func GetApp(c *gin.Context) ente.App { + if strings.HasPrefix(c.GetHeader("X-Client-Package"), "io.ente.auth") { + return ente.Auth + } + + if strings.HasPrefix(c.GetHeader("X-Client-Package"), "io.ente.locker") { + return ente.Locker + } + + return ente.Photos +} + +func GetToken(c *gin.Context) string { + token := c.GetHeader("X-Auth-Token") + if token == "" { + token = c.Query("token") + } + return token +} + +func GetAccessToken(c *gin.Context) string { + token := c.GetHeader("X-Auth-Access-Token") + if token == "" { + token = c.Query("accessToken") + } + return token +} + +func GetCastToken(c *gin.Context) string { + token := c.GetHeader("X-Cast-Access-Token") + if token == "" { + token = c.Query("castToken") + } + return token +} + +func GetAccessTokenJWT(c *gin.Context) string { + token := c.GetHeader("X-Auth-Access-Token-JWT") + if token == "" { + token = c.Query("accessTokenJWT") + } + return token +} + +func MustGetPublicAccessContext(c *gin.Context) ente.PublicAccessContext { + return c.MustGet(PublicAccessKey).(ente.PublicAccessContext) +} + +func GetCastCtx(c *gin.Context) cast.AuthContext { + return c.MustGet(CastContext).(cast.AuthContext) +} diff --git a/server/pkg/utils/billing/billing.go b/server/pkg/utils/billing/billing.go new file mode 100644 index 000000000..7301f55bf --- /dev/null +++ b/server/pkg/utils/billing/billing.go @@ -0,0 +1,156 @@ +package billing + +import ( + "encoding/json" + "os" + + "github.com/ente-io/museum/ente" + "github.com/ente-io/museum/pkg/utils/config" + "github.com/ente-io/museum/pkg/utils/time" + "github.com/sirupsen/logrus" + "github.com/spf13/viper" + "github.com/stripe/stripe-go/v72/client" +) + +var ProviderToExpiryGracePeriodMap = map[ente.PaymentProvider]int64{ + ente.AppStore: time.MicroSecondsInOneHour * 120, // 5 days + ente.Paypal: time.MicroSecondsInOneHour * 120, + ente.PlayStore: time.MicroSecondsInOneHour * 120, + ente.Stripe: time.MicroSecondsInOneHour * 120, +} + +var CountriesInEU = []string{ + "AT", + "BE", + "BG", + "CY", + "CZ", + "DE", + "DK", + "EE", + "ES", + "FI", + "FR", + "GR", + "HR", + "HU", + "IE", + "IT", + "LT", + "LU", + "LV", + "MT", + "NL", + "PL", + "PT", + "RO", + "SE", + "SI", + "SK", +} + +// GetPlans returns current billing plans +func GetPlans() ente.BillingPlansPerAccount { + var plans = make(ente.BillingPlansPerAccount) + plans[ente.StripeIN] = getPlansIN() + plans[ente.StripeUS] = getPlansUS() + return plans +} + +// GetStripeClients returns stripe clients for all accounts +func GetStripeClients() ente.StripeClientPerAccount { + stripeClients := make(ente.StripeClientPerAccount) + stripeClients[ente.StripeIN] = getStripeClient(viper.GetString("stripe.in.key")) + stripeClients[ente.StripeUS] = getStripeClient(viper.GetString("stripe.us.key")) + return stripeClients +} + +func getPlansUS() ente.BillingPlansPerCountry { + fileName := "us.json" + if config.IsLocalEnvironment() { + fileName = "us-testing.json" + } + return parsePricingFile(fileName) +} + +func getPlansIN() ente.BillingPlansPerCountry { + fileName := "in.json" + if config.IsLocalEnvironment() { + fileName = "in-testing.json" + } + return parsePricingFile(fileName) +} + +func parsePricingFile(fileName string) ente.BillingPlansPerCountry { + filePath, err := config.BillingConfigFilePath(fileName) + if err != nil { + logrus.Fatalf("Error getting billing config file: %v", err) + } + data, err := os.ReadFile(filePath) + if err != nil { + logrus.Errorf("Error reading file %s: %v\n", filePath, err) + return nil + } + + var plansPerCountry ente.BillingPlansPerCountry + err = json.Unmarshal(data, &plansPerCountry) + if err != nil { + logrus.Errorf("Error un-marshalling JSON: %v\n", err) + return nil + } + return plansPerCountry +} + +// GetFreeSubscription return a free subscription for a new signed up user +func GetFreeSubscription(userID int64) ente.Subscription { + return ente.Subscription{ + UserID: userID, + ProductID: ente.FreePlanProductID, + OriginalTransactionID: ente.FreePlanTransactionID, + Storage: ente.FreePlanStorage, + ExpiryTime: time.NDaysFromNow(ente.TrialPeriodDuration), + } +} + +func GetFreePlan() ente.FreePlan { + return ente.FreePlan{ + Storage: ente.FreePlanStorage, + Period: ente.PeriodYear, + Duration: ente.TrialPeriodDuration, + } +} + +func GetActivePlanIDs() []string { + return []string{ + "50gb_monthly", + "200gb_monthly", + "500gb_monthly", + "2000gb_monthly", + "50gb_yearly", + "200gb_yearly", + "500gb_yearly", + "2000gb_yearly", + } +} + +func IsActivePaidPlan(subscription ente.Subscription) bool { + return subscription.ProductID != ente.FreePlanProductID && subscription.ExpiryTime > time.Microseconds() +} + +func GetDefaultPlans(plans ente.BillingPlansPerAccount) ente.BillingPlansPerCountry { + if ente.DefaultStripeAccountCountry == ente.StripeIN { + return plans[ente.StripeIN] + } else { + return plans[ente.StripeUS] + } +} + +func GetDefaultPlanCountry() string { + return "US" +} + +func getStripeClient(apiKey string) *client.API { + stripeClient := &client.API{} + stripeClient.Init(apiKey, nil) + return stripeClient +} diff --git a/server/pkg/utils/byteMarshaller/b64.go b/server/pkg/utils/byteMarshaller/b64.go new file mode 100644 index 000000000..61aa0167b --- /dev/null +++ b/server/pkg/utils/byteMarshaller/b64.go @@ -0,0 +1,30 @@ +package byteMarshaller + +import ( + "encoding/base64" + "strings" +) + +// Encode a [][]byte into a single string. +func EncodeSlices(slices [][]byte) string { + var strSlices []string + for _, slice := range slices { + strSlices = append(strSlices, base64.StdEncoding.EncodeToString(slice)) + } + // Join the encoded strings with a comma, which is not in the base64 alphabet. + return strings.Join(strSlices, ",") +} + +// Decode a string back into a [][]byte. +func DecodeString(encoded string) ([][]byte, error) { + strSlices := strings.Split(encoded, ",") + var byteSlices [][]byte + for _, str := range strSlices { + slice, err := base64.StdEncoding.DecodeString(str) + if err != nil { + return nil, err + } + byteSlices = append(byteSlices, slice) + } + return byteSlices, nil +} diff --git a/server/pkg/utils/byteMarshaller/byte.go b/server/pkg/utils/byteMarshaller/byte.go new file mode 100644 index 000000000..25bfabecd --- /dev/null +++ b/server/pkg/utils/byteMarshaller/byte.go @@ -0,0 +1,28 @@ +package byteMarshaller + +import ( + "bytes" + "encoding/binary" +) + +func ConvertInt64ToByte(i int64) (b []byte, err error) { + buf := new(bytes.Buffer) + err = binary.Write(buf, binary.BigEndian, i) + if err != nil { + return + } + + b = buf.Bytes() + + return +} + +func ConvertBytesToInt64(b []byte) (int64, error) { + buf := bytes.NewReader(b) + var num int64 + err := binary.Read(buf, binary.BigEndian, &num) + if err != nil { + return 0, err + } + return num, nil +} diff --git a/server/pkg/utils/config/config.go b/server/pkg/utils/config/config.go new file mode 100644 index 000000000..ed0bbb6e3 --- /dev/null +++ b/server/pkg/utils/config/config.go @@ -0,0 +1,167 @@ +// The config package contains functions for configuring Viper. +// +// # Configuration +// +// We use the Viper package to read in configuration from YAML files. In +// addition, we also read in values from the OS environment. These values +// override the ones in the config files. +// +// The names of the OS environment variables should be +// +// - prefixed with 'ENTE_' +// +// - uppercased versions of the config file variable names +// +// - for nested config variables, dots should be replaced with '_'. +// +// For example, the environment variable corresponding to +// +// foo: +// bar-baz: quux +// +// would be `ENTE_FOO_BAR-BAZ`. +package config + +import ( + "errors" + "fmt" + "os" + "strings" + + "github.com/ente-io/stacktrace" + "github.com/spf13/viper" +) + +func ConfigureViper(environment string) error { + // Ask Viper to read in values from the environment. These values will + // override the values specified in the config files. + viper.AutomaticEnv() + // Set the prefix for the environment variables that Viper will look for. + viper.SetEnvPrefix("ENTE") + // Ask Viper to look for underscores (instead of dots) for nested configs. + viper.SetEnvKeyReplacer(strings.NewReplacer(`.`, `_`)) + + viper.SetConfigFile("configurations/" + environment + ".yaml") + err := viper.ReadInConfig() + if err != nil { + return err + } + + err = mergeConfigFileIfExists("museum.yaml") + if err != nil { + return err + } + + credentialsFile := viper.GetString("credentials-file") + if credentialsFile == "" { + credentialsFile = "credentials.yaml" + } + err = mergeConfigFileIfExists(credentialsFile) + if err != nil { + return err + } + + return nil +} + +func mergeConfigFileIfExists(configFile string) error { + configFileExists, err := doesFileExist(configFile) + if err != nil { + return err + } + if configFileExists { + viper.SetConfigFile(configFile) + err = viper.MergeInConfig() + if err != nil { + return err + } + } + + return nil +} + +func doesFileExist(path string) (bool, error) { + info, err := os.Stat(path) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + return false, nil + } + return false, err + } + if info == nil { + return false, nil + } + // Return false if the stat entry exists, but is a directory. + // + // This allows us to ignore the default museum.yaml directory that gets + // mounted on a fresh checkout. + if info.IsDir() { + return false, nil + } + return true, nil +} + +func GetPGInfo() string { + return fmt.Sprintf("host=%s port=%d user=%s "+ + "password=%s dbname=%s sslmode=disable", + viper.GetString("db.host"), + viper.GetInt("db.port"), + viper.GetString("db.user"), + viper.GetString("db.password"), + viper.GetString("db.name")) +} + +func IsLocalEnvironment() bool { + evn := os.Getenv("ENVIRONMENT") + return evn == "" || evn == "local" +} + +// CredentialFilePath returns the path to an existing file in the credentials +// directory. +// +// This file must exist if we're running in a non-local configuration. +// +// By default, it search in the credentials/ directory, but that can be +// customized using the "credentials-dir" config option. +func CredentialFilePath(name string) (string, error) { + credentialsDir := viper.GetString("credentials-dir") + if credentialsDir == "" { + credentialsDir = "credentials" + } + + path := credentialsDir + "/" + name + return productionFilePath(path) +} + +// BillingConfigFilePath returns the path to an existing file in the +// billing directory. +// +// This file must exist if we're running in a non-local configuration. +// +// By default, it search in the data/billing directory, but that can be +// customized using the "billing-config-dir" config option. +func BillingConfigFilePath(name string) (string, error) { + billingConfigDir := viper.GetString("billing-config-dir") + if billingConfigDir == "" { + billingConfigDir = "data/billing/" + } + + path := billingConfigDir + name + return productionFilePath(path) +} + +func productionFilePath(path string) (string, error) { + pathExists, err := doesFileExist(path) + if err != nil { + return "", stacktrace.Propagate(err, "") + } + if pathExists { + return path, nil + } + // The path must exist if we're running in production (or more precisely, in + // any non-local environment). + if IsLocalEnvironment() { + return "", nil + } + return "", fmt.Errorf("required file not found at %s", path) +} diff --git a/server/pkg/utils/crypto/crypto.go b/server/pkg/utils/crypto/crypto.go new file mode 100644 index 000000000..cb0f9fc30 --- /dev/null +++ b/server/pkg/utils/crypto/crypto.go @@ -0,0 +1,57 @@ +package crypto + +import ( + "github.com/ente-io/stacktrace" + + "encoding/base64" + + "github.com/GoKillers/libsodium-go/cryptobox" + generichash "github.com/GoKillers/libsodium-go/cryptogenerichash" + cryptosecretbox "github.com/GoKillers/libsodium-go/cryptosecretbox" + "github.com/ente-io/museum/ente" + "github.com/ente-io/museum/pkg/utils/auth" +) + +func Encrypt(data string, encryptionKey []byte) (ente.EncryptionResult, error) { + nonce, err := auth.GenerateRandomBytes(cryptosecretbox.CryptoSecretBoxNonceBytes()) + if err != nil { + return ente.EncryptionResult{}, stacktrace.Propagate(err, "") + } + encryptedEmailBytes, errCode := cryptosecretbox.CryptoSecretBoxEasy([]byte(data), nonce, encryptionKey) + if errCode != 0 { + return ente.EncryptionResult{}, stacktrace.NewError("encryption failed") + } + return ente.EncryptionResult{Cipher: encryptedEmailBytes, Nonce: nonce}, nil +} + +func Decrypt(cipher []byte, key []byte, nonce []byte) (string, error) { + decryptedBytes, err := cryptosecretbox.CryptoSecretBoxOpenEasy(cipher, nonce, key) + if err != 0 { + return "", stacktrace.NewError("email decryption failed") + } + return string(decryptedBytes), nil +} + +func GetHash(data string, hashKey []byte) (string, error) { + dataHashBytes, err := generichash.CryptoGenericHash(generichash.CryptoGenericHashBytes(), []byte(data), hashKey) + if err != 0 { + return "", stacktrace.NewError("email hash failed") + } + return base64.StdEncoding.EncodeToString(dataHashBytes), nil +} + +func GetEncryptedToken(token string, publicKey string) (string, error) { + publicKeyBytes, err := base64.StdEncoding.DecodeString(publicKey) + if err != nil { + return "", stacktrace.Propagate(err, "") + } + tokenBytes, err := base64.URLEncoding.DecodeString(token) + if err != nil { + return "", stacktrace.Propagate(err, "") + } + encryptedTokenBytes, errCode := cryptobox.CryptoBoxSeal(tokenBytes, publicKeyBytes) + if errCode != 0 { + return "", stacktrace.NewError("token encryption failed") + } + return base64.StdEncoding.EncodeToString(encryptedTokenBytes), nil +} diff --git a/server/pkg/utils/email/email.go b/server/pkg/utils/email/email.go new file mode 100644 index 000000000..89993882c --- /dev/null +++ b/server/pkg/utils/email/email.go @@ -0,0 +1,99 @@ +// The email package contains functions for directly sending emails. +// +// These functions can be used for directly sending emails to given email +// addresses. This is used for transactional emails, for example OTP requests. +// Currently, we use Zoho Transmail to send out the actual mail. +package email + +import ( + "bytes" + "encoding/json" + "html/template" + "net/http" + "strings" + + "github.com/ente-io/museum/ente" + "github.com/ente-io/stacktrace" + log "github.com/sirupsen/logrus" + "github.com/spf13/viper" +) + +// Send sends an email +func Send(toEmails []string, fromName string, fromEmail string, subject string, htmlBody string, inlineImages []map[string]interface{}) error { + if len(toEmails) == 0 { + return ente.ErrBadRequest + } + + authKey := viper.GetString("transmail.key") + silent := viper.GetBool("internal.silent") + if authKey == "" || silent { + log.Infof("Skipping sending email to %s: %s", toEmails[0], subject) + return nil + } + + var to []ente.ToEmailAddress + for _, toEmail := range toEmails { + to = append(to, ente.ToEmailAddress{EmailAddress: ente.EmailAddress{Address: toEmail}}) + } + mail := &ente.Mail{ + BounceAddress: ente.TransmailEndBounceAddress, + From: ente.EmailAddress{Address: fromEmail, Name: fromName}, + Subject: subject, + Htmlbody: htmlBody, + InlineImages: inlineImages, + } + if len(toEmails) == 1 { + mail.To = to + } else { + mail.Bcc = to + } + postBody, err := json.Marshal(mail) + if err != nil { + return stacktrace.Propagate(err, "") + } + reqBody := bytes.NewBuffer(postBody) + client := &http.Client{} + req, err := http.NewRequest("POST", ente.TransmailEndPoint, reqBody) + if err != nil { + return stacktrace.Propagate(err, "") + } + req.Header.Set("accept", "application/json") + req.Header.Set("content-type", "application/json") + req.Header.Set("authorization", authKey) + _, err = client.Do(req) + return stacktrace.Propagate(err, "") +} + +func SendTemplatedEmail(to []string, fromName string, fromEmail string, subject string, templateName string, templateData map[string]interface{}, inlineImages []map[string]interface{}) error { + body, err := getMailBody(templateName, templateData) + if err != nil { + return stacktrace.Propagate(err, "") + } + return Send(to, fromName, fromEmail, subject, body, inlineImages) +} + +func GetMaskedEmail(email string) string { + at := strings.LastIndex(email, "@") + if at >= 0 { + username, domain := email[:at], email[at+1:] + maskedUsername := "" + for i := 0; i < len(username); i++ { + maskedUsername += "*" + } + return maskedUsername + "@" + domain + } else { + // Should ideally never happen, there should always be an @ symbol + return "[invalid_email]" + } +} + +// getMailBody generates the mail html body from provided template and data +func getMailBody(templateName string, templateData map[string]interface{}) (string, error) { + htmlbody := new(bytes.Buffer) + t := template.Must(template.New(templateName).ParseFiles("mail-templates/" + templateName)) + err := t.Execute(htmlbody, templateData) + if err != nil { + return "", stacktrace.Propagate(err, "") + } + return htmlbody.String(), nil +} diff --git a/server/pkg/utils/file/file.go b/server/pkg/utils/file/file.go new file mode 100644 index 000000000..db9434702 --- /dev/null +++ b/server/pkg/utils/file/file.go @@ -0,0 +1,40 @@ +package file + +import ( + "fmt" + "os" + "syscall" + + "github.com/ente-io/stacktrace" +) + +func MakeDirectoryIfNotExists(path string) error { + if _, err := os.Stat(path); os.IsNotExist(err) { + return os.MkdirAll(path, os.ModeDir|0755) + } + return nil +} + +func DeleteAllFilesInDirectory(path string) error { + _, err := os.Stat(path) + if err != nil { + // os.Stat throwing error would mean, file path does not exist + return nil + } + err = os.RemoveAll(path) + return stacktrace.Propagate(err, "") +} + +// FreeSpace returns the free space in bytes on the disk where path is mounted. +func FreeSpace(path string) (uint64, error) { + var fs syscall.Statfs_t + err := syscall.Statfs(path, &fs) + if err != nil { + return 0, err + } + return fs.Bfree * uint64(fs.Bsize), nil +} + +func GetLockNameForObject(objectKey string) string { + return fmt.Sprintf("Object:%s", objectKey) +} diff --git a/server/pkg/utils/handler/handler.go b/server/pkg/utils/handler/handler.go new file mode 100644 index 000000000..6b229c202 --- /dev/null +++ b/server/pkg/utils/handler/handler.go @@ -0,0 +1,106 @@ +package handler + +import ( + "database/sql" + "errors" + "io" + "net/http" + "syscall" + + "github.com/ente-io/museum/ente" + "github.com/ente-io/museum/pkg/utils/auth" + "github.com/ente-io/stacktrace" + "github.com/gin-contrib/requestid" + "github.com/gin-gonic/gin" + "github.com/go-playground/validator/v10" + log "github.com/sirupsen/logrus" +) + +// Error parses the error, translates it into an HTTP response and aborts +// the request +func Error(c *gin.Context, err error) { + contextLogger := log.WithError(err). + WithFields(log.Fields{ + "req_id": requestid.Get(c), + "user_id": auth.GetUserID(c.Request.Header), + }) + isClientError := false + // Tip: To trigger the "unexpected EOF" error, connect with: + // + // echo "GET /ping HTTP/1.0\r\nContent-Length: 300\r\n\r\n" | nc localhost 8080 + if errors.Is(err, ente.ErrStorageLimitExceeded) || + errors.Is(err, ente.ErrNoActiveSubscription) || + errors.Is(err, io.ErrUnexpectedEOF) || + errors.Is(err, syscall.EPIPE) || + errors.Is(err, syscall.ECONNRESET) { + isClientError = true + } + unWrappedErr := errors.Unwrap(err) + enteApiErr, isEnteApiErr := unWrappedErr.(*ente.ApiError) + if isEnteApiErr && enteApiErr.HttpStatusCode >= 400 && enteApiErr.HttpStatusCode < 500 { + isClientError = true + } + if isClientError { + contextLogger.Warn("Request failed") + } else { + contextLogger.Error("Request failed") + } + if isEnteApiErr { + c.AbortWithStatusJSON(enteApiErr.HttpStatusCode, enteApiErr) + } else if httpStatus := httpStatusCode(err); httpStatus != 0 { + c.AbortWithStatus(httpStatus) + } else { + if _, ok := stacktrace.RootCause(err).(validator.ValidationErrors); ok { + c.AbortWithStatus(http.StatusBadRequest) + } else if isClientError { + c.AbortWithStatus(http.StatusBadRequest) + } else { + c.AbortWithStatus(http.StatusInternalServerError) + } + } +} + +// If `err` directly maps to an HTTP status code, return the HTTP status code. +// Otherwise return 0. +func httpStatusCode(err error) int { + switch { + case errors.Is(err, ente.ErrNotFound) || + errors.Is(err, sql.ErrNoRows): + return http.StatusNotFound + case errors.Is(err, ente.ErrBadRequest) || + errors.Is(err, ente.ErrCannotDowngrade) || + errors.Is(err, ente.ErrCannotSwitchPaymentProvider): + return http.StatusBadRequest + case errors.Is(err, ente.ErrTooManyBadRequest): + return http.StatusTooManyRequests + case errors.Is(err, ente.ErrPermissionDenied): + return http.StatusForbidden + case errors.Is(err, ente.ErrIncorrectOTT) || + errors.Is(err, ente.ErrIncorrectTOTP) || + errors.Is(err, ente.ErrInvalidPassword) || + errors.Is(err, ente.ErrAuthenticationRequired): + return http.StatusUnauthorized + case errors.Is(err, ente.ErrExpiredOTT): + return http.StatusGone + case errors.Is(err, ente.ErrNoActiveSubscription) || + errors.Is(err, ente.ErrSharingDisabledForFreeAccounts): + return http.StatusPaymentRequired + case errors.Is(err, ente.ErrStorageLimitExceeded): + return http.StatusUpgradeRequired + case errors.Is(err, ente.ErrFileTooLarge): + return http.StatusRequestEntityTooLarge + case errors.Is(err, ente.ErrVersionMismatch) || + errors.Is(err, ente.ErrCanNotInviteUserWithPaidPlan): + return http.StatusConflict + case errors.Is(err, ente.ErrBatchSizeTooLarge): + return http.StatusRequestEntityTooLarge + case errors.Is(err, ente.ErrCanNotInviteUserAlreadyInFamily): + return http.StatusNotAcceptable + case errors.Is(err, ente.ErrFamilySizeLimitReached): + return http.StatusPreconditionFailed + case errors.Is(err, ente.ErrNotImplemented): + return http.StatusNotImplemented + default: + return 0 + } +} diff --git a/server/pkg/utils/network/network.go b/server/pkg/utils/network/network.go new file mode 100644 index 000000000..6ca6de49a --- /dev/null +++ b/server/pkg/utils/network/network.go @@ -0,0 +1,36 @@ +package network + +import ( + "github.com/gin-gonic/gin" + "github.com/ua-parser/uap-go/uaparser" +) + +func GetClientIP(c *gin.Context) string { + ip := c.GetHeader("CF-Connecting-IP") + if ip == "" { + ip = c.ClientIP() + } + return ip +} + +func IsCFWorkerIP(ip string) bool { + return ip == "2a06:98c0:3600::103" +} + +func GetClientCountry(c *gin.Context) string { + return c.GetHeader("CF-IPCountry") +} + +var parser = uaparser.NewFromSaved() + +func GetPrettyUA(ua string) string { + parsedUA := parser.Parse(ua) + if parsedUA.UserAgent.Family == "Android" { + return parsedUA.Device.Model + ", " + parsedUA.Os.ToString() + } else if parsedUA.UserAgent.Family == "CFNetwork" { + return parsedUA.Device.ToString() + } else if parsedUA.UserAgent.Family == "Electron" { + return "Desktop App" + ", " + parsedUA.Os.ToString() + } + return parsedUA.UserAgent.Family + ", " + parsedUA.Os.ToString() +} diff --git a/server/pkg/utils/random/generate.go b/server/pkg/utils/random/generate.go new file mode 100644 index 000000000..47932b660 --- /dev/null +++ b/server/pkg/utils/random/generate.go @@ -0,0 +1,15 @@ +package random + +import ( + "fmt" + "github.com/ente-io/museum/pkg/utils/auth" + "github.com/ente-io/stacktrace" +) + +func GenerateSixDigitOtp() (string, error) { + n, err := auth.GenerateRandomInt(1_000_000) + if err != nil { + return "", stacktrace.Propagate(err, "") + } + return fmt.Sprintf("%06d", n), nil +} diff --git a/server/pkg/utils/recover/wrap.go b/server/pkg/utils/recover/wrap.go new file mode 100644 index 000000000..cbae2e734 --- /dev/null +++ b/server/pkg/utils/recover/wrap.go @@ -0,0 +1,30 @@ +package recover + +import ( + "fmt" + + stacktrace "github.com/ente-io/stacktrace" +) + +type Int64ToInt64DataFn func(userID int64) (int64, error) + +// Int64ToInt64RecoverWrapper is a helper method to wrap a function of Int64ToInt64DataFn syntax with recover. +// This wrapper helps us in avoiding boilerplate code for panic recovery while invoking the input fn in a new goroutine +func Int64ToInt64RecoverWrapper( + input int64, + fn Int64ToInt64DataFn, + output *int64, +) (err error) { + defer func() { + if x := recover(); x != nil { + // https://stackoverflow.com/questions/33167282/how-to-return-a-value-in-a-go-function-that-panics/33167433#33167433 + // we need to use named params if we want to return panic as err + err = stacktrace.Propagate(fmt.Errorf("%+v", x), "panic during GoInt64ToInt64Data") + } + }() + resp, err := fn(input) + if err == nil { + *output = resp + } + return err +} diff --git a/server/pkg/utils/recover/wrap_test.go b/server/pkg/utils/recover/wrap_test.go new file mode 100644 index 000000000..80585436e --- /dev/null +++ b/server/pkg/utils/recover/wrap_test.go @@ -0,0 +1,52 @@ +package recover + +import ( + "errors" + "testing" +) + +func TestInt64ToInt64RecoverWrapper(t *testing.T) { + + type args struct { + input int64 + fn Int64ToInt64DataFn + output *int64 + expectedOutput int64 + } + var expectedResult int64 + + tests := []struct { + name string + args args + wantErr bool + }{ + { + "success", + args{input: 1, fn: func(userID int64) (int64, error) { return 5, nil }, output: &expectedResult, expectedOutput: 5}, + false, + }, + { + "err", + args{input: 1, fn: func(userID int64) (int64, error) { return 0, errors.New("testErr") }, output: nil, expectedOutput: 0}, + true, + }, + { + "panic_err", + args{input: 1, fn: func(userID int64) (int64, error) { panic("panic err") }, output: nil, expectedOutput: 0}, + true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := Int64ToInt64RecoverWrapper(tt.args.input, tt.args.fn, tt.args.output) + if (err != nil) != tt.wantErr { + t.Errorf("Int64ToInt64RecoverWrapper() error = %v, wantErr %v", err, tt.wantErr) + } + if err == nil { + if *tt.args.output != tt.args.expectedOutput { + t.Errorf("Int64ToInt64RecoverWrapper() output = %v, expectedOutput %v", *tt.args.output, tt.args.expectedOutput) + } + } + }) + } +} diff --git a/server/pkg/utils/s3config/s3config.go b/server/pkg/utils/s3config/s3config.go new file mode 100644 index 000000000..fe83ce6ea --- /dev/null +++ b/server/pkg/utils/s3config/s3config.go @@ -0,0 +1,207 @@ +package s3config + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/s3" + log "github.com/sirupsen/logrus" + "github.com/spf13/viper" + + "github.com/ente-io/museum/pkg/utils/array" +) + +// S3Config is the file which abstracts away s3 related configs for clients. +// +// Objects are replicated in multiple "data centers". Each data center is an +// S3-compatible provider, and has an associated "bucket". +// +// The list of data centers are not arbitrarily configurable - while we pick the +// exact credentials, endpoint and the bucket from the runtime configuration +// file, the code has specific logic to deal with the quirks of specific data +// centers. So as such, the constants used to specify these data centers in the +// YAML configuration matter. +type S3Config struct { + // A map from data centers to the name of the bucket used in that DC. + buckets map[string]string + // Primary (hot) data center + hotDC string + // Secondary (hot) data center + secondaryHotDC string + // A map from data centers to S3 configurations + s3Configs map[string]*aws.Config + // A map from data centers to pre-created S3 clients + s3Clients map[string]s3.S3 + // Indicates if compliance is enabled for the Wasabi DC. + isWasabiComplianceEnabled bool + // Indicates if local minio buckets are being used. Enables various + // debugging workarounds; not tested/intended for production. + areLocalBuckets bool +} + +// # Datacenters +// +// Below are some high level details about the three replicas ("data centers") +// that are in use. There are a few other legacy ones too. +// +// # Backblaze (dcB2EuropeCentral) +// +// - Primary hot storage +// - Versioned, but with extra code that undoes all overwrites +// +// # Wasabi (dcWasabiEuropeCentral_v3) +// +// - Secondary hot storage +// - Objects stay under compliance, which prevents them from being +// deleted/updated for 21 days. +// - Not versioned (versioning is not needed since objects cannot be overwritten) +// - When an user (permanently) deletes an object, we remove the compliance +// retention. It can then be deleted normally when the scheduled +// cleanup happens (as long as it happens after 21 days). +// +// # Scaleway (dcSCWEuropeFrance_v3) +// +// - Cold storage +// - Specify type GLACIER in API requests + +var ( + dcB2EuropeCentral string = "b2-eu-cen" + dcSCWEuropeFranceDeprecated string = "scw-eu-fr" + dcSCWEuropeFranceLockedDeprecated string = "scw-eu-fr-locked" + dcWasabiEuropeCentralDeprecated string = "wasabi-eu-central-2" + dcWasabiEuropeCentral_v3 string = "wasabi-eu-central-2-v3" + dcSCWEuropeFrance_v3 string = "scw-eu-fr-v3" +) + +// Number of days that the wasabi bucket is configured to retain objects. +// +// We must wait at least these many days after removing the conditional hold +// before we can delete the object. +const WasabiObjectConditionalHoldDays = 21 + +func NewS3Config() *S3Config { + s3Config := new(S3Config) + s3Config.initialize() + return s3Config +} + +func (config *S3Config) initialize() { + dcs := [5]string{ + dcB2EuropeCentral, dcSCWEuropeFranceLockedDeprecated, dcWasabiEuropeCentralDeprecated, + dcWasabiEuropeCentral_v3, dcSCWEuropeFrance_v3} + + config.hotDC = dcB2EuropeCentral + config.secondaryHotDC = dcWasabiEuropeCentral_v3 + hs1 := viper.GetString("s3.hot_storage.primary") + hs2 := viper.GetString("s3.hot_storage.secondary") + if hs1 != "" && hs2 != "" && array.StringInList(hs1, dcs[:]) && array.StringInList(hs2, dcs[:]) { + config.hotDC = hs1 + config.secondaryHotDC = hs2 + log.Infof("Hot storage: %s (secondary: %s)", hs1, hs2) + } + + config.buckets = make(map[string]string) + config.s3Configs = make(map[string]*aws.Config) + config.s3Clients = make(map[string]s3.S3) + + areLocalBuckets := viper.GetBool("s3.are_local_buckets") + config.areLocalBuckets = areLocalBuckets + + for _, dc := range dcs { + config.buckets[dc] = viper.GetString("s3." + dc + ".bucket") + config.buckets[dc] = viper.GetString("s3." + dc + ".bucket") + s3Config := aws.Config{ + Credentials: credentials.NewStaticCredentials(viper.GetString("s3."+dc+".key"), + viper.GetString("s3."+dc+".secret"), ""), + Endpoint: aws.String(viper.GetString("s3." + dc + ".endpoint")), + Region: aws.String(viper.GetString("s3." + dc + ".region")), + } + if areLocalBuckets { + s3Config.DisableSSL = aws.Bool(true) + s3Config.S3ForcePathStyle = aws.Bool(true) + } + session, err := session.NewSession(&s3Config) + if err != nil { + log.Fatal("Could not create session for " + dc) + } + s3Client := *s3.New(session) + config.s3Configs[dc] = &s3Config + config.s3Clients[dc] = s3Client + if dc == dcWasabiEuropeCentral_v3 { + config.isWasabiComplianceEnabled = viper.GetBool("s3." + dc + ".compliance") + } + } +} + +func (config *S3Config) GetBucket(dc string) *string { + bucket := config.buckets[dc] + return &bucket +} + +func (config *S3Config) GetS3Config(dc string) *aws.Config { + return config.s3Configs[dc] +} + +func (config *S3Config) GetS3Client(dc string) s3.S3 { + return config.s3Clients[dc] +} + +func (config *S3Config) GetHotDataCenter() string { + return config.hotDC +} + +func (config *S3Config) GetSecondaryHotDataCenter() string { + return config.secondaryHotDC +} + +func (config *S3Config) GetHotBucket() *string { + return config.GetBucket(config.hotDC) +} + +func (config *S3Config) GetHotS3Config() *aws.Config { + return config.GetS3Config(config.hotDC) +} + +func (config *S3Config) GetHotS3Client() *s3.S3 { + s3Client := config.GetS3Client(config.hotDC) + return &s3Client +} + +// Return the name of the hot Backblaze data center +func (config *S3Config) GetHotBackblazeDC() string { + return dcB2EuropeCentral +} + +// Return the name of the hot Wasabi data center +func (config *S3Config) GetHotWasabiDC() string { + return dcWasabiEuropeCentral_v3 +} + +// Return the name of the cold Scaleway data center +func (config *S3Config) GetColdScalewayDC() string { + return dcSCWEuropeFrance_v3 +} + +// ShouldDeleteFromDataCenter returns true if objects should be deleted from the +// given data center when permanently deleting these objects. +// +// There are some legacy / deprecated data center values which are no longer +// being used, and it returns false for such data centers. +func (config *S3Config) ShouldDeleteFromDataCenter(dc string) bool { + return dc != dcSCWEuropeFranceDeprecated && dc != dcSCWEuropeFranceLockedDeprecated && dc != dcWasabiEuropeCentralDeprecated +} + +// Return the name of the Wasabi DC if objects in that DC are kept under the +// Wasabi compliance lock. Otherwise return the empty string. +func (config *S3Config) WasabiComplianceDC() string { + if config.isWasabiComplianceEnabled { + return dcWasabiEuropeCentral_v3 + } + return "" +} + +// Return true if we're using local minio buckets. This can then be used to add +// various workarounds for debugging locally; not meant for production use. +func (config *S3Config) AreLocalBuckets() bool { + return config.areLocalBuckets +} diff --git a/server/pkg/utils/string/string.go b/server/pkg/utils/string/string.go new file mode 100644 index 000000000..6244ca8f6 --- /dev/null +++ b/server/pkg/utils/string/string.go @@ -0,0 +1,10 @@ +package string + +// EmptyIfNil returns either the dereferenced string, or "" if the pointer is +// nil. +func EmptyIfNil(sp *string) string { + if sp == nil { + return "" + } + return *sp +} diff --git a/server/pkg/utils/time/time.go b/server/pkg/utils/time/time.go new file mode 100644 index 000000000..c03f97696 --- /dev/null +++ b/server/pkg/utils/time/time.go @@ -0,0 +1,97 @@ +package time + +import ( + "fmt" + "math" + "strings" + "time" +) + +const ( + MicroSecondsInOneSecond int64 = 1000 * 1000 + MicroSecondsInOneMinute = 60 * MicroSecondsInOneSecond + MicroSecondsInOneHour = 60 * MicroSecondsInOneMinute + + minutesInOneDay = time.Minute * 60 * 24 + minutesInOneYear = 365 * minutesInOneDay + minutesInOneHour = 60 + hoursInOneDay = 24 +) + +// Microseconds returns the time in micro seconds +func Microseconds() int64 { + return time.Now().UnixNano() / 1000 +} + +// Nanoseconds returns the time in nano seconds +func Nanoseconds() int64 { + return time.Now().UnixNano() +} + +// MicrosecondsAfterHours returns the time in micro seconds after noOfHours +func MicrosecondsAfterHours(noOfHours int8) int64 { + return Microseconds() + int64(noOfHours)*MicroSecondsInOneHour +} + +// MicrosecondsAfterDays returns the time in micro seconds after noOfDays +func MicrosecondsAfterDays(noOfDays int) int64 { + return Microseconds() + int64(noOfDays*24)*MicroSecondsInOneHour +} + +// MicrosecondBeforeDays returns the time in micro seconds before noOfDays +func MicrosecondBeforeDays(noOfDays int) int64 { + return Microseconds() - int64(noOfDays*24)*MicroSecondsInOneHour +} + +// NDaysFromNow returns the time n days from now in micro seconds +func NDaysFromNow(n int) int64 { + return time.Now().AddDate(0, 0, n).UnixNano() / 1000 +} + +// MicrosecondsBeforeMinutes returns the unix time n minutes before now in micro seconds +func MicrosecondsBeforeMinutes(noOfMinutes int64) int64 { + return Microseconds() - (MicroSecondsInOneMinute * noOfMinutes) +} + +// MicrosecondsAfterMinutes returns the unix time n minutes from now in micro seconds +func MicrosecondsAfterMinutes(noOfMinutes int64) int64 { + return Microseconds() + (MicroSecondsInOneMinute * noOfMinutes) +} + +func HumanFriendlyDuration(d time.Duration) string { + if d < minutesInOneDay { + return d.String() + } + var b strings.Builder + if d >= minutesInOneYear { + years := d / minutesInOneYear + fmt.Fprintf(&b, "%dy", years) + d -= years * minutesInOneYear + } + + days := d / minutesInOneDay + d -= days * minutesInOneDay + fmt.Fprintf(&b, "%dd%s", days, d) + + return b.String() +} + +func DaysOrHoursOrMinutes(d time.Duration) string { + minutes := d.Minutes() + if minutes < minutesInOneHour { + return pluralIfNecessary(int(minutes), "minute") + } + hours := math.Round(d.Hours()) + if hours < hoursInOneDay { + return pluralIfNecessary(int(hours), "hour") + } + days := int(hours / hoursInOneDay) + return pluralIfNecessary(int(days), "day") +} + +func pluralIfNecessary(amount int, unit string) string { + if amount == 1 { + return fmt.Sprintf("%d %s", amount, unit) + } + return fmt.Sprintf("%d %ss", amount, unit) +} diff --git a/server/scripts/compose/credentials.yaml b/server/scripts/compose/credentials.yaml new file mode 100644 index 000000000..d20532ec9 --- /dev/null +++ b/server/scripts/compose/credentials.yaml @@ -0,0 +1,28 @@ +db: + host: postgres + port: 5432 + name: ente_db + user: pguser + password: pgpass + +s3: + are_local_buckets: true + b2-eu-cen: + key: test + secret: testtest + endpoint: localhost:3200 + region: eu-central-2 + bucket: b2-eu-cen + wasabi-eu-central-2-v3: + key: test + secret: testtest + endpoint: localhost:3200 + region: eu-central-2 + bucket: wasabi-eu-central-2-v3 + compliance: false + scw-eu-fr-v3: + key: test + secret: testtest + endpoint: localhost:3200 + region: eu-central-2 + bucket: scw-eu-fr-v3 diff --git a/server/scripts/compose/minio-provision.sh b/server/scripts/compose/minio-provision.sh new file mode 100755 index 000000000..34e3b98b8 --- /dev/null +++ b/server/scripts/compose/minio-provision.sh @@ -0,0 +1,16 @@ +#!/bin/sh + +# Script used to prepare the minio instance that runs as part of the development +# Docker compose cluster. + +while ! mc config host add h0 http://minio:3200 test testtest +do + echo "waiting for minio..." + sleep 0.5 +done + +cd /data + +mc mb -p b2-eu-cen +mc mb -p wasabi-eu-central-2-v3 +mc mb -p scw-eu-fr-v3 diff --git a/server/scripts/images/museum.png b/server/scripts/images/museum.png new file mode 100644 index 0000000000000000000000000000000000000000..a93370da8a8e620bbd2ad6d21b1caa9e4a016388 GIT binary patch literal 104994 zcmeEuc{tQ-|1cBT5)vv|N>bU9vYQ#HNQsoCtR>lE>}xY)Xcv(XJ5Z}jnC-ua_`_~VPWAt zsejCbg@t2`g@t`9=SJ{EP)!p2dlS@ISJ(KYuCAo97vie(H3t?J{kxCNIV{e75>B!- z+4Y(QVsaD_s?RGaeM1-?ByjD<9{9dpd)b6E%QFUhKR{G`Opg_8xU4rRcaN8PS?Bl# z5gxx`L+;V?yz4X9316uMbmPj%moo2q7F=MctnkRQi!49FxuxeW>D+J0bl%i@E`;S! zGkbAOO#P#K7YYi_vh>z0&n!-FB;NAtIGIaoT3vJLy^e|IVA;asxzYA>{p}?dmH;6m zx&3S*`$pttgzgP*Q^Cn%xQ=SsMixew8bm5b2VmdIC?)ZF@~~X-RWN_Z9#ZS@QTblL z1!LV5k;@_iw)-IPN2~>C6pB^iyU(2|9Cq8~GN|$pM`AYK`RXPYud)ka7V085qrz^Q zc#8XfUb_&v=C&O83bRcmKhyEl+o56aTfP-fEcy4>jq{W59C~gq*@;;SdyqGKdt2Jo z`nQjwS2L81KHoaBfx=%gTlOZ3UsQMJfzRQQCUe`LyfxnZ-s2|j(aoC)$L{b-CMiC+ zwI}1ciUId`(}HHoX|DLR?@gZ4_e$q2$PQ&EZ z^SH!qx!HW3Z-)-d-}Zi^z;i4+l~oMmZ}T z3ztn-OK(alo^G^+Pb$b87)N|~#Pcom{P@t5?{B-0IV1&mZdToG_JpsR?HH>H=f+Kv zVe%WchU}EQHsL&qG5?%oT*JQQ0XtjG$q$?E>z^gw8B}838V1|7g=K88RPuNp%T}G$ z3CS!i4b17|h^WiQ6apZg&^v5_vlm}%liqPZW|O$?i6!i)Rd>Kaf>!qr+FFu?9bQ@P z-Pm?|lgzCxQ=U>^CCDFA29@|#%oFYg^L-hrfPLe7mN@&sBeD5p z=@A`^-OKyy&*B8fZ@+YjnzNVp5X~6FaC}1wA1zhX+?k}GeuH+Aj=+q3}321z`EmtAeo#i85@pD&KyxIr<4J`;- zW5e9zZmV;cUHx%mZ-^X#P#vot#8cX~ezkHs_4ZMS# zYFv#W;e&@tH&54ew6WWS*vxLeeuDqTjb?t0%Xi{-b>5Nse(#_FdJo&(qu-;>?-BcS z^w^zEPi}qD`5fi&@!}Ohld#F-n|H}XE$C}{@dSqWNaaNt?Ys3dr96YnC2X_alYKl9 z#{^qEorE<6pI#Mo}qi6J7q-DZMh60&-$MWLVnElT_agb-OXvwcq9PuXaJKwC$ ziYVs9!M$o*u5<2u>vUB3(w1k@#yZ^wYWixt$!Q92HFrm47`>JbdgG|{I8Hagz)Igr z@2jDf&NUh6?!_qmJ-QF2K4G`&et*b)GA;E;YGUe8s%)x4E61xHJ@+RKHcO{R9sjY# zi#?EIdD}Ag^8RJz$XGk0Blg%gVPRJa)ou_Q>SlGBf zQP1bT`$@;PK%zQPp4diIuzsGKqoAj%S7PNK>mKi(mlT;)Ve@L>N|ixXN>Cytt~vs$ z2Qxw!tU_s`l;m4+n=bJRY`SnT)&1k+T{FD5cChW>-1K>qpQx4U(v^<~Hz|ENV&&3p z7tqA1>UHp*^3yA?2S#6?Z8GkER;?STTYqa>G^lUe5x;|xs!1-DJ*6F`P4CNzGz(ir^jN3Ig zlWRYF4tHx69?2i>7Vhip9VklaVedA4omE_M)#$Zj??`W6A$~GMBS7tn=6MY(uVZ4` zVmBivcJ|#CxE07J$eShJ`7L$&z1IP+m%c%R*)uzbt-gO9T=6RHn0@{2)a_)k)rJr6 zq?;VV>k|buq;{W(zVEbSuD9dO`PHzX``u9{5QsNcx-sNL44!Ijq@8yHbNbG3cV~eFDQmt zh3?Ve2pcFwcDMD&gcc0u555{K=1+W(`k-hRSy%pO;ZaxJ;CEVYU))OMl)0gIOObbG zFwP?&VaFN74fFTJJ2CpCk2gLRMe(KVe7@(bVM_B^fs`0*xu~0%{5|(OtV#)S!%F1K zZuT;@;*nCdA8Ji&weLy@z~oo2L9{O&eH(eh|E|v6aKV{9J<^{ZK7DlLQQh9IJ$NIo zGZ$i7)q|Es$5Iqu>`3E2C)%d({K?s}jZ%B#{sr|HuGw|ntp%#JJ(2NR(<=cfbAdzH zGtCJ#iTSX^2T2dBrD!~=w=5f`>Ibi!cOcGpSDIFetiV^fQ6)7-`KMv?9DdOvS^|5z|sI_hDL$uW&Hy%rff=pU92ey?n=@ZDPpKM)Q*ZIJXD{c7<_%a6|e zo6iZK4!pSY;!5B7>jw}n3x^k+3)8jZ8lP4>Z#*Pre`4Wu{S$9TzXtC3BBYA1-}EHm zl$Alu9>lZ8m0=lP&XS>nF?)nnmXX zRCvApydX3!Bx-in{F9k)Zsf)O(pr~>hevEXujW0Aam`E5`nd&51S9458O$An~B4;9yQPihoWpbBTKGfLv z0V+RRJTuvZ(D$IfxhJl-8@2DdvG*fH+naZdw@bv7U?zd*XGXu_#O&RvlRM^d3pS80fn6(>^gOA+|gcx(I5r%>W35Faj z!NoOGBJEu}dEj!qA;%CQ9Zn5w6jZTGA0{>pJ})AG}(WY*P){p1AL58A@^kEZ=^ znnuv9OOWd>PvG!U5K34%Se(3SXvlH^{N`j~W8KKI0sLZRfq;K)`QtY$_`UBx&u=@k zu&u9QVS$4GXJLWhAb&pL7{hJ&^Edle`X9wQFN079QtEu(!pFk!w1z#xP2tiN#AOGC z05=c%0v4?R4e-;=!RL}>fZH{9Z;b%$-Rmnfz;F6v#odzYOMG0lcUu@5OX?!L93)i~ z4k{en4ds@Ul+^ONa#h3R*oi-02mhtL+sVhrLqk!~-``)sUs(a+<*2BnuCA_l@Q~u6 zL-JsSymz3x&!qr)cWXecwx$5xzd!yLZzM`k#M(!Rh0C z^*<-Md;f7Qa6v`-Ulf%T4k|Kw%^|><$!qk#{CaKuUcZjhqHm_5i*WPsa`5&BU_q6% z)(`mGv;TniYrTnsH{zNv{T8O~&OXpXf4uTvkN^8EBQIwMfEM~||8vS;pZ)iGV<&_U z0+7hd+5V)vkAoKg@sGFvJdpW{zwAZ7w}!EEfWtM5W6o|4?(4Tabnws-Ek%ZZV)*KX z|GcWCeDL63U;WFU{&-bOkxtvc(E1l6tv>~v1m)IJ{2#7_a;uW2rdU|uEGLiYoDX1~ z?6=L{(}k2-^q#TeUE@`k+H60LJ-(&!;XMalfeR-#IiIwI3ws@HwUV-7ljMT!6(|~T zjEd)Y$ETEbQzWq9q|`@UVV;OkmNUX%9_Z}kh&p{$h(lI=P#?1A6wB=p{jWV)|prmDZAqd)y6l%06q-?t;v8;Tdd8;3}4+uy^if&tep?`tr{2Bpi-SL^SBJYg( zgB}My1JVGlu0hTnHEt*@n0(^zYOkg-}ac6 zO^ze2OjWmM&t;RJj<1Xp{9dRdo{anm67y+75(th~RGX3}N4r+vN0C!~YZs_Tp1~eX z2M!i+O&wb;eykbn3ma&R>=NwVhC~wscfd+^kc)yNM6>hqY%BbTRV}Wyy%l~aC{!b+ zW$~q_wWezFqrC0Nyr3j%33BzLdRKy1MOos(ilBj_;2(bTq~))re#Ag4%i(8zrqQ?<3G#zajr!7S_!;#lf<{N_F^=a}2?

)dQy;6CZn3NSbEW;w z8|NFA)~4r%>ef7k+V$)RODg{QYm0pdbpAMzP}{mF&`9uUGwj=i9MbNP{2l*W;A(F2 zh?o?4t@v5k$``FQA*z>u=AFNU{PGrYEnYS??`ku8G95-6BWICT5g8gK+TPU#NzQkE zr(Lq)lA#oT0#dc#;~no>SAl)q>Z1c05-S728<3e|t+u?Qws)aBwSx|SKs5IKPEEl@ z*j{%0Xw#gj6qNl`H&4c&WhaR|-DaNi02)(@519Lo&D#7`5{G!csrNUE57Wx_oji_S zT}V!!$iuHvB~}JGjLz%Os0)jp$j*%~w-c2dRuNgZLr`W~BZB|F8QhgkMMcGAg&0h- zBuV7O)W9wb4;Sryi`^XX^T8@$QL1A{!%X#Dt*E>w?7*1%Z%E;D2KV%M@@@&3|MXW_ z=0>vS!e);b(RhkkO-u4BF&$yfKg(F(>6!($Dqgwv}1K5bWz6s}qn zP14HBkx9;bkw?N;oARv6UeV++<*Ghnzk!P1=@DG$bGoQ|=`Pc(#yoQjZS7FgEAqM9 z#12DLTKPA7rSR3}XG<4?M1Qx@=xw-MpoH>x*4GtS?uivkUp;MKecJ}ynuMZtdGzHf znWedy zsY@w6dtJH4gzTGXt@j&QcN10vYRHeA^?1~k&wl&?)P2i2^6v!~3z?V*yF(klM!Coa zfQ^rjFLEE;{aZlUF&0SFfZO;3)PQK&Z^=!xhU*i^Mj>;*Vc42hwj0D#DSlGFIe)Yw zyC{dMC;kRCob5vVtzD13e=GlS zXW{xVskk<--}sbpYe3}x1&{%j{(r$BjsT(SufoM%k;VA0qrYQ_nn@s~Jq=bmzhQ1R z7obI43(9`$QhZU6Oik|pBJ>+=1INVx^(7o1{u?b4^1lEwQ&0b|W3U;4DJp*_^8bte zva_#UNIbJHCO?JMXq^`|f}NYcw7lD&SnBIKt#X99wQ&jqtj=rNss||P1D8CyjGmh! z`OM#*=z>tRvNa=9i68sp`>+A-QcU;oF|F|;TjykZ0?UGv#N^RvK@`5V^a$b?vL6Sr zrNO*0o%k4UX~YM^N1ef!nHe|v-ehW9nzI2s7mRY~R;nfreq&~hc=OY@mF~>GOSE@u zeYwh&@*erukJDj@C>W7p60|0I%fA7*hR6@M&WC-H+^+z`|gY| z!*-1U+h#G1^TAwpyJ!XmKVN`#qmebQAQZX18BJRjnY{)V1^$Iw_ERYEHF5kH2M%Ij zcPK^f64Q&l20jy^_}-WzKrgp|g(*Dww&K4Y5D&P(?PW|gNTPrPOgV^8bp93F#Q@uV zCi`i70Je8Q(%6Vk{+&81^!3Kl$UoMz6Q3Oa*Zo_;h11OS*MK=Oz5~x>MgA0McjO9Y zsynX$Zu!*Fq6!vl-EABMPkzEiy6~2%6TiM@)vEApz)zoFBv1FSbSub}~H z1L!y3K=~r7gS_8zf~i+wkrwj$@?*-h;W~oK#hV>}R^L~~b}^-ZMO@9r{+OFY-(f2d z1!5vOiSnh>eF2*DN=G&^p>p$fKx9V)XGw62IBQ%U#@Dt^n=133sc!;r1J`(Qyoi24 z3@1@LW16~TIj_`V$m{@haKMOx3mhDvW&ww5AN=ky>-b?iQ?V=r`(M>A0%*Z}XJ>2E zJoVl_{kA!?EHgXcbOOly{S-4gPrMFc2`ZoWc-r{s!aw)@^#qQ@L{MVOAJVt$m7Hx_^V6n% z@JGtH_Fg=0EYTMWEmEJ7Ze(u4PRj$w@agBfU^l1a8W{4DHgal)omgmg#u%0rx4=|S zJG1XmZ8K2Ef^+2s`J0=}51t}4jf}W3p$XjX6$sZ}g{RQZDGCXxelpKB9`D)C4ZvpT zYaZOn-^wSi1ptT3u(1&vV$K>9veP>mt_5rU5``vi;cr%akq}DxQ0nrS36_Za8HBg=m&$tQXf=BVeH1KyM zc{oQ4k39+2K|b+qk!5xQ2>7dC-y7uvC+y|q9F@Us4bV5XwL9x`m*M`vgaBtnF z@z9~-H*cdubpk*YVWQf27q^OEMX%T*R9`x81%F_WR%-SJp~+C}*7jeil^y*2&8i{Y zul6P_4+K%(;pfnJdD-}BX7W(WmdX}!)`cDl7XsutE{wPK$e{OS%$R6LF!Q><>;~HJ zczqdw$4MU!C-7ehj~8e!DGez96;k{Y%%A-HtxcE_6TqbH0{qQgXD5Ch38lDqc4Yi> z?fR1%6tFHdpGvy*x&;J}H_Q*Gu7l*b=fuev3vj!Qth;ENEY1Mz)@T9A4kVT%b8yvt zq$3(kMC0HA$yAAs*#vNP9=i79EY@w{9@jj7T9=vdQ2~>Z-^>XEPJ2p6^RsTO3LA-* zQFxXydf|9r^k}*G7D35;JHR!e7|rfVTy@y-BYT*^1W+sSW;PnYWXR1q8s9!?ZW&5J zb@VwgwkbGUh$wAGZVNcyP92h$U^Mn^MAc-{jcuIs7%*hXw9c!E_0~X?0muwcz4d_I zUgWczE%Lw&%7StRV#;8mY&$8<^cb4KYC8ATtN_9+^A^!%-4&qOb!*b$J(3v+P7`HD z2dw*az_HkMvzZs1p$N`E^PUpJ;Huj{kBTq`3ivl50a)=w7J!g{3J%AIzo+v6i8st8 zlF2=?^+B3+cR-0W9H$9@nr?qZjN%+WmG@wk9vA-eB{-I?b3D>i-`2d*7l54sR4;U6 zz3?5}YROu|GoQuQS2L3+Cie+}3j!4E-CCp*0#^jAStS=N5C#P?bkI_u4pQJlGZ#Zt z^GhB~0ml5q!F1pVOVSVI$P(sn&nt&aGmB)#fnz*TR(6jCBMwx3u+_@jTM8L}sAh(! zdjZ7E2=ikr0N@I+J~8=<$Kl4d2GYc^f1+Uh$!QrN$-n%h{P$H&@B&CE3jxeE%({N3_)8)@!>dY`xOx5t|DRWTEpU~mWWLr=PpnFC}&x!nZ#+e^|+C?^?^ z1uQ}e^S6@AD79xoYA}G38}23;$A#dk^Fv2BGrTe<|27bAU@-w;Ka89t4g`{Jg5ZRl z%al`0PS^rY$n32(2&riYCuC+OSY&_`n%|Dn<(dK86K#Mdr(!|#5L+#{!q9G;5>vP_ zq4V>YD&rS?E`S$?$9I78k}@Ei<6du_Zve4xc~XNJ?GHQ`O&}sO2JSU%oRiX$IKrvg zxnds9@je?J8e_)oY0HW=8b7vvF8-iiB=>vZxpeA7>0$``Vq_D3?$c! zeAJ;8@>!kK<5OH)on~ey%XjA&7n=niy*|ZL5$d>;jgx4J<6))Q2;Dbcsm7)fhnviH z(Pmx{KL`dVURa$A#-Q zvqI{DqxZ|r1TkV3D{{7W3t6#JBjA|+8eFKmuAuv}S=EDJyB0F6V#OafQ*?w$rDdOG z4Wc#HTB1q{ZU?xTM@9;V&0l@e#0#O+L6chB(*55K1nx zqxAGwzt5O|3nkd zSSARvBH88(j>Dl8%P~(aSg}#Y>K*7$`@m#EJ(Y7VISaD7O?TjX`Dovmp1&ehB8%ro z@fj3KEw@x9QZLxEGThR3?Tk-BOf8ez)f7;EB`IfuPKLlYK#-fIq?Q`Lk*iD}B$e!Z zs23upTsu@UX>{E!A3YZF=7Q3@>slU7g^fc`sjDN=(8WBH@?rd`vAc-Pxp(cTbArnX zkH{@oZ!CMd-&jehpRW^Y@gqu$3k;0NPe)-#s(+F%OAOU3PuX8Zi=TTf;8t5qc(wK| zI`1yB`scF>lY2eO)?XIOpkSp6qhG(HM zSZrOv0b--&7YnQ%m8S#MD!dDwq$4q-w=Zc%d2HnZ_QQR0_nE57;L`%tBc73pR6Mff z2JHLF>|Hk>q0l!`?zFKPgOYLQ!;H0~@r#-hPx@Qw5zb*LNuQk>wUcvmQ}5lvqzcd@ z_?>9~r66L-_>j|=S6ISzY>hUm1X|45Yx`2XsY;9UV+r4xC8?vd$`7sc>MTulKYR4h z^Tp<~63>DY{6{{PEkCVVRIDv~v?cH$PF3e7q+5weKaSNyC&pwLVE>TAgzMmv*g-bvoLFi^kATVZkVH$DJg6c)3H671D>lv^u*Ws)?$v zY?4z7)^56mwbMojG}g^=_S^fd$XVW4D0q#$ru0rLCKNrJt79yYCv}~W)ljN%l(uBO zM1?(UeUyA=EE8eM+fX*FxapX3a6?d&EcXv-YI@mlqPb&q-IulMYw8)QDb) z`1&8W+Fo~`{fP0uF_dm2eq(X2Vy2)!xX%u@I?_hi{-p+MyqcDUCyCQQ(|h5tqUfZr zdo`M^h6)X^v4J$PdY!6{eFjTDMoFn*^CvQkfr=SR> z_?4EI{Q*B_VR(_QAA}(EaN>2J5onVg!F^Y_;4*5? z=9&!pV7FekIIYQ&IQ3)Zhk0<*SM+nrTlln|)k3@D?XLT|M-OWCPC0_Wr>kp}fujL# zVz&&5sj#gs)7&DNFNxXNOm;pwe==GrOiO&o@{1LkLTpS?n$U^_$JD>WFD3IHT{yVY zS^=6=_i(A;!x38=io_m*<%`KekLF&Ql{nQs@?=`ceibKwb+Ar^WA|ySF)hpV4vJtq zU44KeNRH@PY>92dZk~QZUHX*AOXb(0ASJ|!ij-|MQ3<2_fj-VFEt<6=TfC?(=~_u@ zc)^3YZEic&}LQs!)tvDyxd1TTi$rx7@q5!6*z6w`#oUg#C4m`+DFHB zqVN{%Lu<2&9H)A)Hq<56jZ?>|p6+rS64+9e+`CjmQ71aYM65hyzNKd-EF}ykLCZ>X z`w6Z6vNG30OreR286{BPtCEZFXnp{;O&y0-(l(CBY_i-uE{`OP7-0$+XPkli?%Jj@ zUn`2JEG#PWT>y<@FRI=L0^MVA54ck5KRK(t2gQ^}7Fvna{#~J@omcfrkuQSeyhNvl z4wLKgz7qDIvRLnv6;Z;2z^+Huifp>vi4yNROx}jbtIwNJd-!lW$vQW`fU&PpqXU6a zdzxBXQuNlsI=G}6;3QPy$K*W8W*6YSX4R)Juu)`Z6|1YN z#oEc{H6P=*-jOC60ncXk19r^82wxOaJ@3DI?sAZlBjae91rxopx5|i>6_O)(XL}{c zLTtNc{f&EimM1T5D_`Fv1U=VJBBmL+HKLx;V?&04oe>awZTZSef=-1s{sUI7-egiD zJe}?GFedIS`g>=N>rlCeH0t4Z<9k&(wvaA!e29mWo2<1hP2{JcXveCt7RABtHA6e3 z24YO~g9l_T{(Wa8Av(ahALe{7;&?)r5JNE{gYC1dnSUHoCJd%HmnI8mQ*--Z!3^W{KBj2TH1&sXS(vy2C%xi=+QQj)o+t z=fVYeF>nd?YGP)C&)ZjIq+FGf$R2*%m_Xum)rPLn@|UtVIlubR%qUAO4MYiAaBN9z zX>xlG=crT$wq}4~HWOI;rLHCgvyM1AN^I9sWGiBNrav|FKC1c5e)$O%wrt2s`^?kQ zy4ec7Pi$MKp8i^Y3Ex!NG1RHsx%6!2KV?GyXpjU1!gf)XA0I}W+z`u7%E`e+mY5Dk zxh!5^=`R$BGq*dOjU5^4ix=r^Eg`&Kp_PU+ESSFr9_xC-6Egrp%iuKumi#NAWWKht z+TcT+eTbO!OB4n*)J${V5ylJQzUMyFcHor0*!D+$&cKj@!nn zLvuOhna%Awv-WYYpf{Dh$;+7m9q zUc1WmI$W3Ta8o2DGi5 zl@T%&3$D_OoB!Ce<{Dtop2`G}{C@-D_l~V2t9*Pj)WO7eBT2q`Ku;%LEOyu<&`b!P zn8vO4*Sn5UXHGB8QHnR6hB35`#vp1(ZUTjk=nx#LZ210@1^=&Fl_T~N5>RrsC`d-Q zw--dDbjxO-7v5gf@E$o?Vgy+VGJ0Qu%-dcOG_*AE7m~1E6*zYswLb$ zyz8t4L<&47-zf+ZaY>fBsWVko-cP6!3EHn|eWW2)R8>Xky?>GkJgFBv_y`r)Iejdo z<{|rKGcr9-bEaXyjiSrLroi!BGu5C`?KPo}AEMyMeEX1#;Sj)Qvqz$foO7@NDD z>D`@x)w2@BRvx#s)rzw1Q3Yujgn@~sA=kPHk4D66KCSSOPLo)Bi~c+fpJP-2!kBR` z#Wt8lpOoR^&ybbO$dmP-_3er&#~+SUm(W386DX~-&yzp8XoPZ`UFyd7stc}WHck4P z8eq}LfPN%eebW+94H|tvbtQcg?9D+dUT z)cF=KgY4FVsLQ1SKwhkSY*s8xqr`jrN()(=k@^mS-v#2aCF{8nh|3f}3OL5cCzHzA zY_DnAkb#}mDqgatTqqu~5l;~FEgy;*%l*>zzDFj~$UmZR5tIXG+=ddp*doOSswQc4 z)zcRX5^AYQ3{;pTNcI3r#0nn(-iam<#Gfw9gVyhgcPHT2q{8ZCM>y(+d;^Rsj*A9r zsto%guvW_TRrAQn+d!b6-EUDfbL z#kJ!DJ+-X|zKRXZbVZUGX=BO!1DJ1k_uQ8p(;z#Ek>>eLw6JuOpy98LDoIN|2H1XOi&bcXUhFHZ z@=*8EVzYIaR7>ZWYAqr0dP7Qw_Wa_*2hek;t3;ZO42R!C*e|QlTE@v^O_3|Y$Q1?J zCIasPkMre>)F<4Q?X>#%skDekEEsDDJ*-5DG*ol!4go$rNyNr*>~xU?GGXP-AZb!JwRx3c$A&hC7{;;VUWtDHz&KSyb(n_V?cZZ6xTr+9 zqID$kg5Op|s)433l9~lB>5G7F&ezt1A8XC-Z6_79BL@a5OU-CDRmq1wi8JhMk~?GP z?OnKdW2sj}!Cmbls8rT?v3nUKPSD;=`+q$YS0-NAmAu^eW5GU&KEVN!bv zvw!f{Ieb6DYFnfyQFZ*bo;Jfa3E>(F>w==phe!kY=N*vMmD5k z(`IN2NYyp% zf5({8g4?K$4f7fqdcaq(C#cQ7k*U_jdj8B5}Jn1O@*z*)B7of#WQsRrXCi? zngo68a8*o4nT$Nm6rGv93!!Wl>1y`l+PUNBLf?#a&koJ}32qL?%H|?#W}6GGKam9^ z%(>EoaC@P*h9Nz&G(KRRy? z(=1qZkOgUDzIC9qrhtFLB#5sEP|3^2fx32n5b%J;U2z<`fU?bRl^GHeI7*;Cj1+yD ziUl1GGN7X%cTd&igOKW22URD=Z#b2K&WU06b|i>_!3(yDRhsF0aOC~}^s#?EnF3cR zDRd74-r_D`q>gg86uN_|oteiw#sz=O8Q=|Snw|;U1-BCdFPLVK$w?5(YUjX5hR!1# zw-ZAmk&4Qz1paJ81}!5bsclILr$LeQ;6+}>sABoc(2S2YC|r@a^Zc%Z))z{JCL`hj zVR145u6}f?AK)tBvW_D+3Gb4`+1_}m!WgiS8W%u~EKh%P2%84z)|m0|G(UKlmB!uN zzusZRfH~!iLBSSoWhf}wYzyd(b|w7y$WAnLaiKRQ{+DaN{v>)A=t@c190ZE49H15~ zCO5w2_8#0HEg1~?RvM%eV&tF4^@#@b|t{kzLkCfp%sWS29YqcUzYy58*JbVf3?uN$PBsx>ND0(W}m^QtK>_a4v_5*P>WV|HsP6108DQC$gx5(0{P z2r*^zH*x&4iGh4hGIW{Vx*xRqBgz^ZlkE zR+>ph{|y$*2DZybuuD}z%HwZ+K0{E}!^W-6vFCY_pjuYr#kQNj4{?_o5fL#(xC`|eZT8amp#Vl9Y$UB#Rn1Ovn$RM2iDEe32StJ5k_FbH|Dy@-YMT*~}H$|%gCP0{Y&$)4piXbL8gXR0q z4wAZTle=>`M~2AH5EWC!dvMA6KO|Wp-k_yt4WzA8yKI8pF3Zm04fP@|rrmZ&hW+i7 zXEhY=nW;{;dqNpt(BKV-?em7`76KQ6_Pc|F-X;}VjeH#jy+yGTQ||Qhjt@@u6!ziQ zv<8wnN6{p6v~O^kGidvZ1X-ZNyZBG_qJKz9ob>Pa?Rx*SF&PG%LnrS|ngsCt<=rPR=)c#G6pLPtEkO$geUIM^LNS~UW~dIEZc9nC zecFBbdeq0i=f)g>nXG?#dm!6leGp>3H9vkUf4gdCkCi2a@>8ye-k7e!T}d7Yt|$kI zd#S^PbaA=>QA$Czs*Isi_9l*O`#ZQs!1`#p`X8{ueFC!;ru266Ut6l9!IpotHx=`| zM;yf>R0jfvmUhv_INunv}hE*F<2pMzUm4E9m!~mp@(UE$STW&Y5NtfPaPTdP^ zm7z-~;uPGL(D)c&Z1iCb_=1BUucL`^@x&gw-`1QD8YKT7+5<;RpYCXRyn%#o8ujjC z2krdTCxM-O^t7-<8uz!6v~Z#B?rw`o-q(LurV&7wiC${gw`7kIXm$h*>@oQW6JY#P ze-Z(&rI&c^{ToL9qPg?W0pzs3%M$03Q^qxc7GJqbgjC?goooo}+tMs??FZ8qbze+F zLqqR{V9@&!>2kea*y1=m`S@R84+gLk`f4@*!Z?397f6>6Ff3yU2_3sIrW^m50{BIi zvcjB{+vkE7F|uP(6{`N#-$3-nWEZ;=z$D+dJA){JbFm_YxBiC;n|pV#5o6ns61^_f zp7M-x0P7WnD$q|v&iz3X^Eki(>ZcJ&THMG=!Y}ID*@EPG&rjuLfA+ z`?aFVD;NcE;WK|Cith+JQJ1HQ$vs&x!*qXOXi|Ykc=R$n`4fGpW?c#mAe6nZv%O4b zvR{C})kt=@@-lllpr_9-dg7XNszRzi89k(T1T!jKv0yF$FS^i4y+jQIWg~94{src! zy|E$=F)x@Z&kUh%JCmyz1D$!^qe`c;s98!pj_*6 zkjTuh4D&5Pc`k=IiN1Ihpo(Cv zsK<;vm_q<`ht9N;sx8f`=0Q40@0g1_xTCiLOx-5U-T$8}{@0H`>|IcsE-P{&O}JeZ z@$y5o0XM)CG>RFZ@ID|dZdn$3R9T#@)g^XPc;S?Hy)+v@5flxHU^bNU+8l(o1HHa4 zMj(_Au_ng&fFbC>rN?yH#3rlR|IMQRNe}>SKVTjIl*k;hn<1#cG{JVorg9^HFe{{p zn`x8zV!=dX!N~x|{w!zL6ChMYWwMd#RcGFb#cl`apw8Ysbb<*To4G+pf7Mocl;3X! zlGtTSKg%tQVIs-lWPXbibP{YJTw?Mr9A^p$X!bZuG*PHIuR0Qxt-5gG+7?&npnjLB zjblPOI1EsZmQDS9LLO(kot7)gGwvUy)*Mo^o305q+-za{3pV~n6XW|JsEsr6!An6X z^Dc53ig|mjN6$W@W0WVE6Aaq?MT7r!j}zd)lpRTm>&c7WGEw37Cc_L27sQZGLazMH zOyxL71SDFjZw&K+6g(<|jWpbq5uu$RlOfVvQt+K&wQQZ;PJ=Re@`+FCML?>G#$r&< zaHz2#Ri&3Uk4yO^`w2D!jjhELsSyCFDrPy-Na5y^RAZ_Bt?gL5fZn(oKjXEU;lqZ; zXu|E{R#gC0nR@M&km(X+n@;9pel`0qvXz7A-MS!3{blA$-UOfX_#k4V$PEX8Lz}Fy#Oao&R z{xkL4G5(4x3~Jp=QtxUl?I(Oix@hQDk%hqJWvl8Gl;(j;avUffqK(dopHx zmwB1axPl1m*F+9d%M=c#2D?NHan(ujS|iR&#XT0}D1MD(Fyjqv;Es}C&jxlwKq!ZI zqMvZHh}p~u{e)kc^4B;h_oxV4R*yD_VgFs2aL2(%rzTu|E5O&=WV_}k*og9qcHtF@ z83@lgA7+*qT;9$dNbT1fQ?8(wqWYeEkkcrT|7|e>)DMpdI6VMNq>v??P!PHppcYCI7a=q=R~up=B=*lJ1yw7dHsfNz zNl<13e01=vfv4@C`W#?>YwJNjP|p+KMeu@>TQ_b+-?%dm{K z?s<$R(@Q!>fTK_uQVuHulH+nf=3mtCRyj~~C{^R|HH4!<=U)ZA4Ee`rJQjGaE#>tr z3(yDQ{_54NGgi%)_3sv~`Mz4bsZ4HV|7BU*Ey6*)M*cudE;!PT3v#=&*zfD`+s8#> zc~uN$7w!yD-Fo2AaWsenB-PW}KmzwnY}X(-UwNe8Z9-a>lAN>9!munR0XE0M7O8NDz0GuL3k8ftv7@_*gUZkxH zf{_gL?#pkfJwwiHf>4G{sXs=}m6HPu@)AysVa}0eg4gs+G=kL9&MD6HpQ!Fy+V0sP zNZ1J*CmjYgYV;zeek@X|&Fb4uy44xIR};mTN~8q8Ye^7l;HNbr}TyjCa8BS#fZKO|7(YJVxSr44vf3;0QH2xA{ zHEIXwlKrRrTk~&Qb-gNbqV1Uy=cvDk^40V@IZ~x|fH}&n5lV?G;k-}oUH#Fw|M;C` ziQvSMrAcsULKAw?%zFKkS<*aKXeaVaD?aeeL$Y+&X#xYzAYK41C6Fu-gH-GzBLmbd zp;vQai?wNqE@dkR642j@Tz;;7s0t>3&)Q$5Ht&~&gn4~zQ)dI64P#Vwerp53KQUZi zQSZz^!_2lXc+@5BetLJ#l7UP8fy|13UWu(-aWGk)C#xU?wHA6zU&2z(7yZ4M7B>6l zN&T8K${d9cK`m>KkBE7L-U%dZ0?|&d_YE$QI(CqyEWzg=b(pxaXp}ltrcR}<0pz5?|t`I6c;0pQ86P5IRt@bhIW@c|UTQkmj=Sk}`SJKz4nw zLRRR=W4hrQQ7>a&mItX4jGj%j{0^Kna(DGVV;o$ zwIma{lWQLi5G4rD8%|byl&A+NM8>Wu38662poF; ztIO?<9B+blMr@yzsW!7-h~2ME%6Stg@DfHVd$TYHld{4-S>Q|BVMSi*x?SI;rw3=HL=-u?M1ehNM@UiX=L5)S|SfFO+-W(A)Z`6BPC6KMR;KL-9vt>cJen`)9 z4kafl(PYoH(jxzMO>fmDVC6Ikl@Pmua(lYBY?9^#1K2B^((B8kYJm?{5%jWFhvg|E zwmX;R9XagpK`UonP+d^a+XW^iF1)8xvw!Q-TKi zprH{?W&4t9d#QnqSolpZKzvv5KvJ+etgj5_**8l{^*#~%5nF@29!?05;G5QS{8-mY zt{<2e;r?Fze+awIfTpshy&_0c5gRICR8&Noh>FyJ4FxG8iqe9x0!oR}TM}Z!260u2 zAS8+)g0!W#gcSh=B=inZkQPd4A*6rj>grnGZ-22dH}{m8Idf*7dBzOm!ss2)7@f_( z%^M7kCW4S-63saCAyfrTAeMWyBR%;0xUS-AaXil82V^UbIQ(ml*o$HmsXS zl4JKGbmzN{kbB1->%qV<*3(hE)$0J>z)tGs*;&=Hy^l>`1Kd8cL=pmy*7R7oeX+e3 z-n`H+E;D@-e{A8kaON{PX&ZGr+MmBLbH0Kc_4BfMnwgRf?ZS5l$DHE)G#Mgoaea=O z-1Er9e}d~~D75UGYO2AdcZi7P)ORU}+}wHKqhxKsE_kqTeTCkE>@)K`!(q;I+Qpp~ zLv|EmZoX`TA+3K_5;30Df8bHxr*gCJ&L;hKXji-r3(d`9tMAUQLKu%uTqL#cE4D;& z(A<%+!l5&C_C(;F`Q*+mTgmDp^6%>L$>D+Z>nB{4{NcqMp3cNP69f(8PW)S((r_Ld zW5OS*q)$xk#m&+gOVpFOdZ@Ld473w!_$9Npm7%pIg~T|mPGxi7?h5*GA}t@ zPUmThjAWjuqi>B$qagDa#50%0f$Fahvz+n9;F+9#|=<<&)M|pNEoK;Mj3I%MM2NmX)IH`%1ZTd#(fuMOY%N zh};|TiYh>`@3I1Y?S8&PWZ^+1fjzosAt97#P`5bt_M=(Ji`BGN_-UaT$=Ab=hA)rb zkzcax<7`#jowKuKjvu{*MoC4Jh%6KKnLZvUW+iXnuGE()n;rTPoLJjr*a2tRhZJo> zCRWd8cU038X@hQ#+fhn%7z(HMm=n_a9HTvgu$8KQO3r>&ewFXGX2dGE+vhXhOiR@T zl6w<@Y3%`$*Q_(^pU!xSaX3M5pE|YM@?GT|eZWw7GwS8v_X?n0!5^;aBC)7xqvD8# zR~e?g-elcTGya^JzYIT`Kf`ySO*)O2PHJc4*GDM4-Fo<}7Uqu9PEMLxc&`@PKWk*-@-{@?gVo#8EHr@Vh#75 z6zuZ%yD}C4+vc2vnL0Y`OCx2|c>X>cB|i>(_ZJE$be4wjoT4+DZOm*nHlBEz(0%7T z%lno-H)G2gnsOL&A|Vgs-&nO!Zt$>h-cDm}jzH#^%&O`mWrIlzCrefJWPr~pFU^7( za2qU_Lxg7&f}T5^jqs|I8VOgKJy?#tIsR}LTPY=z(qU*$v8hi`MoUgANH$0WyOUq^ z{p5@%@61v=|7U9S1>_(|%-%&^YnOagJwCfb(snpYH!6*P9o{RG?;kNnPc3&0A8z3cIW(!9`DE|E<%+Bzo7r%oamnf_5k01RBXgQqQq|3 z#+b_*%0#o- z9%E&vNlr^VYPUB&muWz@5)J4b#IpdY8wEhyP7qTPRps76md;f@#xNUhfBbp9Y6gX* z7gcT5rRZ=3#jV!6!t^g2vm29D5T2*g9&P;2Vu6Fl1Omnz$?YWISB|f1JyN*w99n0I z&;&R2-LS`+afUYXboW!c#^f^HZXtbsa}yr%X8p8~h-%H7K2 z*t`_VS-UF{-PZr0tZ$W>&gG(4Gh?;VKVM!f4}sXyb?k};vqh&O1)X;QB*^A`W1x%< zRRCH};!=5I-PKBLkukHA3a=$C7?DE zzsu`dFllBmX*EN_3NUwM0#qiR^IWQ#*Gr!Wi-I6<<<8iG(#v~g3my0B`UfdgDzUt7 zF)+c-mrPI58i4@1($S6(&R=qd7Ijpd#EOUAzEki2nTzl*b>jAv z9>P$Jg7#JI=HYZ^-UOkF%o<54$h7=JdoNGIxF;B8uRv!2vzPd8??-eNoX;ftd#J$J zjW~3!8%2t=P)SGgCTYCMV&7Z^8Tia!I9sA8QOF>77HTX|JenL=Or5&%{Urp#|HD5t z@-)aQ+iU+cTX)J=aA@laO05LiNexAf=epR|zc{P%DWbG$#Z5Mu68OG9XdR4^L{lEj zLh~nE=c07oF3_5+Y3KSh&H#ib?w|1Y+6N|O{xDpZ;L>6!;fP?OkeU6Q5YIlT&0eG? zT8F`+;Fk3tFq$!{?T@;dNvfy|x-ZdF{|FZ>O_#gJA9sP5yhuOc;%Q~8eqh1Cw`{J- zIIbSnLbP=$dN=44{I37%;z4tGM0oz|y@|NNm9#{-u%e%$e7aZq+grU=hPOYsjotdJB56@Os%<8<|5K}LgXcUJ{z~}@dTa` z&EA_r+&|A_(9U`cv5^cLt4Yx%wB1GTlLG@Sw(?Zqowzc0QGbb5NIE_HK3iB1PdVg( z$q*SaD*-nn{i<~UBnu3hbpo^ygBA;Gvb;?WG(Ihr9pm6#Ay34;mV?4%AB zJKEIsW9AwSz3J+K5%Y8T+f-y~yC?wz;c*9;m)7O3{O9zBl&)6RwMt`KORh9{| zBW}$y=k(>9hlr2wMAN$SN)~ABQ*IY7mkaNG^54}zCLbW1uDmidGt1h2N^+C=dhD>7 zf7o2LeQ%^`!m0)<=g>XZfY5rc-ftD+p!g~{2?b7QzG0=&(wY#VErJn-nkf-$5C5Sv z#!jeKV)gmy%rzXeo4Dy$$?bB=M==iyO=}6vi@f)~wVql=uk_-Ocw;2uXyJ_e{QluL zf60w8O3A(^vzn7un1!hibaqprcr|e5M<&7h(qktrB}aKFyvaXr(ANGph%r9!M})YrQ*(m zu^(yF2z?HAVT;589IHl%Z@ia*-LYXs&S-G0PE;|%US$&7cCzM4aG;eSU9uw@HprdU zx-#!|OG0*T$y_!cwAFi)e0KC|RIF;d#*22flX^xk*P5#g5*d?I-EJlpO+>V~ZN^8Wt*4t~!0?Oea&d0+tv zh7RN=>&tjnHfD0ZWk~t+q|B|bXO>`S()opk2kf*@SNIujf*HSwHeNm%?D8}k_h@cR z=5+K}`1(o7H;j5JZXc3va3$s}rF&jvp!Oesrj}X-QAeF}oc?!2dkG^WsonGv5N2)c z8Q*?)9edm3R(2(^86hX@vM7bzUcce)|C~dIGav+;WpPl(f28YSF<)uV>to9SZ_SJV z$-Eeb+A|MZ&A@XFE}QjR3DF~A+RK2~P@W|qb%#_I5v2G)#5L1aY4R9?bENEJj!lI{ z`(&X3LRyXIZyT&;cY!`qgu9?53CqcqD~)JBv1J8qS*1&3gR7x!flNkqm~g+O`j8Vw zr&_K1of1vzZ-)4!Hz^1wamTe$tUs>!YWrw zZ)`knaGcMY8{(g@S;XyoSF$0+oU3wpUHn0s&I29tQ@rokz*PYWy#)tWN3~uXXv3oY z)MNUMIxq2^WCv!U0yZiAY*<3(A$wQtw(;6_dhvPz<4QhhJorv4JXS3>NKF(QvzPy% zU$V9ra5S9LEu~q}t`Aq~a`t(Ltjt4G6 zfzBHqwTrlJQY9rIi87zkR5H^vJ=mV(>2!@I*-||-YZDnWxbX>Fm(laU@mRKVliyB4=1=2 zcpDSUbdzJyxxiOz(g3^4Bk|WtEVDB2DL#Q*YM}CoxMuN7PyrsdKuqUS!m1ExM-! zowgFDx6a3mg&q#~awpAtK(w22g5c&9FkW`fSNA5IkyyF9b|Big6hjZbGb*2%l*>qN z%`N0Q9v^J}hMe#c$Om8^Iqp^kwN-dd6G&f@{gc#nNpH-C#4dvBfVBdW4RF>!2`{{J z3bWiG7q7fUh{zwPoJ+T2tM1tL7_WsH1k`GooE6L59R!;mhz!><9>4;!H~Swt1H&!+ zVel|TJ8WLEC)!w$e%W?;+dygG%FVn&R8L94f{+ztO0XV6erqt@cL;h>1-Q=H%=ivY zu#-C}tMp7Aku+jY@4l_|HTFq>qUiXVCpqA3fA4oC@2nZ zi6!Ya!wF#0{u>K-5MXO^&sB?$Nbe&Y^pk%z-Z#a1D~}%!_M_D?(83U+TQ`8bh$Dy7 zX`*G(J-E)Vqv4xYOi*=)9=Rlpip>Wr;`UYPP7W0Ic@2bX%zGLkSWVIUJnk$Tl#rOy zEr$>vxkop>VsSMxlTnp3^WwU--G=5EmQ6`hWxJI1)~9LzT9-uv4^{xzedX=N)KARq!n2p9wn3)Io($cw*x_!7^C5Wls?w4mWh%%R|1GDQwlujae zyYyS=OfrTzHn&i&AJ~Q(=e#l0yRXM;#QDie(W}E2IU)pUflJUt8cIfH0=J$q$a=~X z;ns3*w&S_uE+^L=dW+Ka8}DER^FK-IjdE$s#l3df7+=(U$hFy`m+3xNSZ);0BdC4T zUn`rl%76nAoG2^1IfWugX)!~;2Vr}@!|-l@*s){gaNn64djR(6eA2iJEa3PVfVIZ4 z8yHa{!^%iXS`(2Wz?u&aMB&8vBd2n%5bWADf|NTSV2vgUs(i8E>snKr94b;?n%Zyz5GgH(IA%=WIK7k z8D#aOL18hfCyZC;@1KN!=u93a@X8GsDae8L>ku{hC-tb3?BKN5T)ud@J_>j^1#{e} z#F& zotjVq*MtzqlQ;WTsfT1;>AuFjKcws(v0t=Pk{yVf=RCa1TyH{3(g+Hj;-TfHpA7A;#1XN@F| zsrRy5qj7hJMd}hKCk`wW`n9ZA|4^{Ah&WP9xX*4vVjlAw4oajmyi4*K+7u|3n|pZE zqzQ?it~k2P%2${!*+62IB}lgy2p#^f$o|O6GwbF@9>|*!i}UmIX|Zk@>*rTj(mNT=4&4ec6l6jFj8OHYKeTMWytcKt<`os%){G(;8MJ&DLw=!zN} zoftdqnK)iMpdMj2i68zTvIDs6H_t<+~tepeOR*$Kj$XQ2UjBy z@F_*VfAp~bgo;|XUPSmxw&N{Mi6^xbhe7O*YkE$KYWj=j>3+wmh*!#;O7CT}?b6sH zv}A=%1blKvF>sYZ9D$^p6JuK1rwhoiAiY%-Jm63iW56GO8$LQSuw?E;^gX@RfU`0c zuxez!+TnVzThMWy^TNqkL4fV0dFj$NWqMt%Dvj|KMM3Bj&|WrL!uL9hywjBn!a1|B z)ENr~PL?9lL4z0icI(Y%nCip+38_oPfpe~cWLuX3ZwREYk=Iy~4b_^{Z?;iyKjPGk z10-0EhW}w8WOWt}%>Gx&p}ZQ$SGl3+s6=zFTcy{0cIts^v%%4AN|zDb&CLfs>5lm^ zY@s1{Z^8W5xx>a};s`k-Qf6297fgudp>pIM?+qdb+{f=#?dFNb=R%Zp`7L*V8wJ}Y zz+gl*oS#i)n8&i}6khTpT?6vi)QHVx9<wTGQ+RE{E?|jTT{%rb348WMGT}D`z>d&EYv|K(ZVX z1mHWO3Tgf@R?M;gL~TV(RC%BD>63M-s?me1?p*Ml-O8TXZ2!exisV{?;KeXjs)daH z!?QE4>51__3r>^?Tex!z+&^_TDD55*;pQ!yO&dyM%Vr1KFdrWj-bX7qKLbv_w{fXZ zRmrK<0q2U??cE~7PWie2!89CV1;i_9@m8cf?Z0sBmN*l*iLdXM_fi!bxK?$n`NTG5 z@M^z>Nn##5NNus+MpY&Md_b}{kM3Bx2&2+I7viXeq@mDBKBlBjMp<<*Ngcpq>p6fD zqsxo{ zKSz3o7em=*VxrH=K7*)pkjPG%^e=emOl6xNDR{S)A_~Z}U#Nh|SlT!+9gQ+=N0yH& zdm|>)n!j);>#q`Gy-UV$|76f!n2ZuP@zt_9Hyn1vOWws3n5Z*Q9< z`yqy#X+{dpUDfv))8}sW{ZU5Qy`VOxXVl^g_>z2?6f~^3Fmq2F|3!3-VX&?OSm0+{ zTcIkhAdwJ$-Xu8ztTOI$v(cADUL}-K7?d7kLXxbj9&2?x!tJjTWN-Jg!GBFxA6YTN za~l$o*YXJh$1qK5(v+4uky0d+0;f9~Ro`(a;{+^p+^FEvIs)PmheP>~w!9$?6r0}T zFMJ4Rx5F*9G+;1t*lj1Lvs@9L(-Q7*Uo&%){&;Y)>p_wOd|qJyu)0M%1~@PNIsL)NiX8#p;W89 zp^R}a<&@yyi6!ssaqoF`Ovkm{H42sY8D)$l97?jOI?)z9Xj%)x24Yv4XE(@Lrd z_+-u|mp1>qq<>&N)Y4e=#c@m4&y>*>4qh0D4Z5!452V%kxzugyca}#gC z%)JhcyTrq~Sg@|cC$6!j4LwV>f>OOrTh?y_H!0cSIh6v6<}18F;qIs$A zKCaqtuo=ai-ZFZE3#>TT&K(q4>q`sP+m1{QCTNI^lpHSo3fG9q zE01KiH>FX!3#r!GGtHG-QYdBZv{dPx_Fv=|gwwa9mR0FN^KW;^+H+Y2a;>vUc7WmU zl)E(P$Y5zWH(EKnehOqkd)#1+Ju`#0B2!Hzo4yHN9ss!22bqfvP$o-^)Lj6!6t7t% z4(=HQ3lPybbtpRc087`{S^m_;dwagbaT_eK8snq9vOQ6yAfo-{kiG^kSw|4USAa(t zm0W)a90RMIr64~vNvPhJLiH|7557JZxluj*%QSNjJG4(uCS=U@B)Ue|1hd!IbD8(K|2`I9P#@|E zb0}Q`Di|^3zt0Y|4k-!HA@+Eem$?u@dd`adq$XYI3m3B4M0}3x)lWC$&u83-d{$u^ z+rN45W)yt;*JiJIYtGo(j`IdSis(u<;X+U49TcLUl9rr!1SpizU9YGhCG_tR`T39S z+p@CUEcU*D+BSV!2m%Diip8Z6h(n80m&o;K4@l^3<`4z8=%FFKO8XG7P2PTWuWvC9m^l6B~cJ{|_B z>q`>PnMgKhxa<3V)zF_F(073FstMrbMzln9$H3%5qM%} z_z|J5Eh(AhoAx{t6=2<1`pomUD=a8-9CBk%)H_>lPeE4gug{SJ=x=JtceUo8_Y-xX6B*o!7i4E^j>w#j zJ?sWrD4F{K($W9E^8KP?L4%jkxIMAzBXdT7udH=tPNuAkIQz!)v6~*Dwttm)~ z6gHHIa>}UwALDue6^XXx_~waHyVXB}0M^~Sg#WQr$3e+DT(`TeMJWZzGY6HO@T#ES zk?@1n+a(Ql&;7A91{hQCurvCUp6u4&G2=jHt`nF!7i3?Dq8K$!BNtTFhrihP#W(uJ zX4HXG3Xb4c+1Z&>9&cXLpy8rKhmst>nJ+;LXcYDCVJB;#QG9c;kums-7!#hwd zsNp1|ND((c`Gy_UNAzYl?*854fYl#@q*^06a^f!nG3cNiA+s;W6=D?mc_g1c(a!+r%dcIHnk)k~ zc}NjJ3Yg=6{Tqg`EU1>mFu z6yl#_9KRKzU4VQSeza=-5vU1xZl+AY6SEb+Hx1t}*E-M-4u1E(Q!*&P!EXfM;h_AB z8xik8#%pd)LJOxZRh3!&+X7s0wHH-tP5-B>+ZHR+LYW;dx!>Tqdq9bRznf^iU2X-X z9bhXia~s*Z8xd*vpy7+fjQrNOw#Su!J7Mf?K;w>R#9naA@iHf2{B4A`w<8yEKPt86 zdSk0`s>zrIl1dVVnN~@Jy$U?1nS|k44#xzf|9*9)rs5L8rN2zqMg25YY!ma-N_h zTafT90oFvgjG8v~nBZJd9w=e|@aT7vWVLF+=zpWP|@1foGLOtixNP;|wsG^Y3=eHiY-S_AGL-`3xr$#q5!#kXo+ z27S;<2Tgb67opOK{b`iWvx0MHAyO2b-x@q{?Pvnm4(qCHm4=0i#Y2X?zc1>n>cV9s&UQ{?|K!*V*pzI=8@> zU;>o_yN#4R$|=`b%<7?o`c%J=af#Hy)ZY(T&>3{0!>`X1H!&tG1J9$a5xP`aLxR{M zl&3#v=Q0y%H~Im|BA@*xiS*-QTiWQSo|u?!EZ!^>3)Sj}akgCQI4+n9*nG4(Zf442 z)Y?t|MIwJxj!3X*R(`H~+%2)&mw<}o6C^PixCu6O$y^cne@a*i=-EiDNlf&R6W^l% zijr|U9MhPm`mJEbJsS3H4p^l|`t@VnB5f@Y`a} zR8TFN*`MOt93e!7ChD)l*%?F7?*)B{|DQL4PPvFY$;O0RqAhA*sAld9r}iwu7K1sC z{qH&62Xiz^e4@NcD&-`oEXOZQrCkyh@Rj(Ntme0RYrY2P?WrRNw3D}+hY^4aoA<$L zdYu5MbN$S6^ZN&dz=La!7)U3pm=8h^(m(iPB?V*GHVkJ#y_A1zVjTLwNfU?*9U~tn z+=e8{xq^1!pva!R1hO4Ol|lZsfdQ8j9dOFdw)jsx61Rl*ksPvMcS#(ZEd2j1P7Kr~ z%YG(9TLZfjs%0-)MiOU8x*Gq#`W6WIMuwkE#(GP|xIl}_o^sPaM)3H5_rD4t>)l}N z*J{$#Jpz%EF^1sb%(^MJqnjJr0KF0Me+rMY0#LN1e794k4pESzdR0)UOY(FHxXuWn0N6=g)}A0O>AF9k(6uWzl2%kPHXbV0Nf^kG7!^_BP|DHHp?^X1Dq;EV-c)ZlHg<6EFv z`&_c=Qu=4sburm(kE$VLx%G|M3>vL=YNl58IgGzhjbd~H{~mix>@_*=o}^ zg(%$pz0}8m`CMJbr~NI z2wvQN?bFX|RR6BJ>64y3^Yrs=T`=^Sr-#5y&J#uA#!>G-98?m;o1Hvw}Aw zy7r}z#%|=z36l!%Z%MHdsj~s`&_|$?oLSWg$m?>ws$Z8(nk*h`diy|L*pC5rdoiOE z1cE^P#ume+J2#qbIEDIdLLl0NK5#GfLWcgeNj@3ltES5VB>^;2(Y=TS`=c%@KCBXq zzma4;W^TTfK0LH@ehvDs;FMZcLz#Zrr<$krPfV2RkH$Qf=Bz=}B(}a}yum;NnajWO*ORlg+3sa%q zyC$mx*xi%_BA-h`Y9{IrF)pT;s`|1#1siI^O$h`x)c&MHLdB^D*2}C;?YqKvG0=Xt^;mZaj;x2 z0G-i;J_uofzP3Kxlc0Q2%0s>6;Gr`{9)AOTORA)Lp36<>6Y$txO?+ z0Bru%Eh+f)f;4dLBuoli$tlLYckkvu$cyxGq#IsPm@v{94{CN-oxJ$%HbOz9I-pzX ziGG_QLjhnS@WAGC7gx7PBz|1?`E>x|lga(aH_pu925@uve@+z7NkpTAT6W5OulXPg zZ0c~X$M~!?=q57^hT%{T0;A03fUQFFB_3&P`)?(?>y>g`dp%56k&w`w16;8m5Ndoc}zZP@;s9kcKE#asQ6kUy*%7kL?6LTV_Y7D@1=9 z1TqR`TUyboDVg&NuCm|H_lrBcA_fqmqWk3Dv%nxa4ggjrA=B&r;3YcyV}D2XF$AJa z_}M=Sb-+&o@gEU>+u~G0AZ{FZ2GQ$%{pD}|aA*TFIeVuBN=4lOZCfR!x8O#&=n zU5yOQaK6P4E`mCZ%nyVMzpf?+7K8vA3(4Pnu@g)PC@%qaIPGk;HH&z5;DyGor`TYG z&CZ$6Hgn&WV+t+I0iaK!25p5K3{c_1X3f8w=5nCW+U4Fm!Mav~d=D@|A=6reb2nf;OLI=&kT*g6ijkK!F z8LyavoL`{+PF8mSV_3TZx-d8xfq5wWvoEDfgo>F+j-!QP@bF? z&*41q1{wcvaJAn)`6b2{1X_gL1rMk%K~4{XH*5@nSbcvnYY^2^+M6t|1(Fe1O_9>9 z$4$C8)ue#aB6SA*KPjp55jc;@qziXN8uTdXMsnP2hNSw3dF5?ajK%)(ha{$Y8?TV3U_=+5VSc0 z$z~+mO5c|SZ0?Va!Z&YPcN$o#mk$TPN9F)LfLd>90*ZdXEpX$nWCeUHD-RfHH&`%G zSAbO3$2@v<=Qaq#Del`83s4%rl(2lDT}Z4G)ZqE=&`@x9%}52OC@9;q8?C#Znr~k6 z&3L|BH!OINtpz?R-v<`WBXm6DZn=}H(zD==QY}`_`g{IV5h&sKX-oM_{YsD=0v2Ja zWcq3VXzwG$8>X8x)j_r$bPj$fo zdjzBgA3bTfCkb2~xhtBbZ34qq#V47He(9~n@C`pY4v+%t&q7gIn3L{SeTSt$Z`Zk+ zz5soGh!z!3)K0E0EbOvl-luq$82pDz_-5t=F9F`&vqhU5Hwl1DJ5XYVNFnbTpbL3$ z-_k5nC`#3j{EU(VNG~F*jlbrzza9J0V@GLe&~9u4v{a%MwMRri`(L%YT59?Lnb^As zC6jM;6s}+E{Hr}q1)f-D%~4ljP@)5}u4I(R9$@0G027z$Hz|-wuX{@JvN8g-ckC?J zU+#A|Q1^mGf5-g+a)%-#X`1yCM0irQ#Phi#hGh=Q_hKXg08t^-}f8KC?vAO9}oPcM8mP%=c|MPTq#p_SQpA+pYl|CQ#tQZ|9{M&Je=s z-80II)Y)GhVKLXshe)3F{izwy1w;(2h#~Bny41ekNsb0*Xd58EpmKoTd0mNrmmGy4RG{ z*Iq0DTwH| zYon4)hPOKD3~qJ(*n-9Zeg{7-?Dh)2gAtxbDz4z}-|kGqWQowd74WNS-&YU3kean8 z4?rlyMv082wv7&tgW?Z5+y}r-=LSaoQ{4GUr=s9rZ;-DfXcVI&mlO|NKVY6SO}33B1p^Dy}~a2h`PqqY{0G1jn?tjBjJWcnBEF7AdqfDjA>5Cbhv+!th7z( z219UsGPq$$t10SY*i_NiyBVr;MN(yF=T1p0Oni6${P$>a&;})zpThE3Yv6sz!$UN8 zk(ku*6?)EfaQ|YFiA=?)XQVvucT)Dfg?^fcFbmNd`HbK-In*$P>N^rbA9s&yG6Tq0 zg=QFaiLlZD=3=R zUEc_vxd!yp`y9KBL(9zt4pcpfsnQyWbX?E2okyHdI)xdvpJG<52s|B}a2Zyepou5% zaPE>~MB!g^2Qt@tRdK5#%QhU|gpqi30)9Kz>G|eMGd+&C?-6aIZWjnM7{}98Veh|| zGk#~acPce-rO09$md(H}?s}=eT*}*3XFM=M+H}(^hSvzMC+QKLso8EZ54Tpjj;}=t z&!$ZdrypR*aH7dORG;8o7}1R2GVetcV{Hw~ih98))nuRX)_0@~A_{+g(trEdr})p0 z7I#P?imKd~dC*}32P$O+mInqYUB(+P9|*2sdtKTHdajN$*>~>o)M+&?{s~UE zIM4Z=LlevtU}Kn%ZRR~oabp{qo_bBQ&xAtEOsp+Y6=H|<`8}gaKG`mZ*@Y5i=O@EX z`TVlw{i)SiJ*YK?hvPte(O5rb|Jv>|&@RR?d`*?ZrX^!wJEO{R7WQl5eHhN$ha2HN zpj4lM0R89S?3it^+95;D5H}+UogAK5Gknl{CU1Zc7U7}Qazj-7BTdr?yQFvEH>S=P_=Uv!#OD)&BDQPaYp42@VL{0~Vo$#Er1vG^-HoCW@y$KWa&d7RZy2%go*N86m zhBzl+B7_B3)oE|7t+3*_*wM-${l^B0zFf^$4W2<3OYJV>f$~sai?1j6Mb#-#3cqWD zLgE*IFyCxu)#z~s^437hdx*2E?gY4D!8jt-Y@v@=^BMWOBWf8bv|cMTg7=5MJFatK z@GNK0sVpH}pO>(zgRXHHMH7*!JdskmPYIPg1nSd&7BK;yiRBsXv7`s_kSl%xX6e;QDL+1^` z#&6$4AKeiKYNeGIZ9bP*bnej`giT?o?tC@dO93X`JrHAS#w_Hp#&1o|&dy&>@TVOcy2z+uxxmf6wk+5UZ^i6)dq&nS^#fHXnltVb)3_FVG z9D`Vt_z?cby{o){KD&@jn=s~^uAnz#T1iiHGyjXPWu=!lHV^Q9 zy8@4pm5ich7x=4FF1rhtt#?2uK zFZAB7A61rzrLjBt?iX6IriZ)v9(n>ix;ekeozEf4j2oul7xS|>jBFKH29`_1VTJE! zM4aW}O|L-J?x`b*K@&G0&9qC^cESq1lq<`Ie++OC)(dc7F-j-q{BNKCD80L+Y<=S1 zUi*)sn1P*FuX06Zy>H^LveDi9s57_qy@7lAZ}l)xCU42YAV$@BC%-w~mJnUOWAeKFx&!ddGk|aY&4Jlqi52NF!vs6c2bba=(bVM7-aEQ2g}AY~ z`Y#_}qIO5~A_DeYr13Q>MP{5vxgt{%rRnM?NgSO~zZWT`7sJ$TZ*5YI0__SXM4G z`nw^S`(s6n<=kn6ozA36brSU%-+8ZF2a!v+Gq9ezRU0`3~r{kR(Kx0hRwSaWh8 z*u|lpQ;q$>Cw_LdQlj*c>tO01<=)&~KP!gaC;GabV{CY*lYtW8h)}&95a{0RFjPD! z|3#zc0mz|zx!{&(n2&LHMz0UbTK0oLr{C5~P&quY|H(J|_V3lJ&X7=g994H0#}2MO zxDJk=zSJ5d>O(|W?1X%ZzUYJO0FO>iP`HPzHK38L*l_`%(Lpu!J}&UtVkQGaK>F@j z^%4PaTArW8u!jMqezf0DeFMvH=wG|7rnp~#86yj$fyr){zvt4INhYeEiaW@bmPk)m zjf9cKu<_u=>i=}u7FvxJjDG;m$yda-Gzp|`su}hnXO8^5I583W*REHS+C)EUtE1oD z!$;&=i@K{eAlp!a49T`?In_84$|Pb_12BthBai}U1J^HW34B#M2t*(%44Qu=G9#=N zxC~>;)bI&~7{8u_P+4-Z0JB1!%s=})JUG*AwA>5{ez{)}igJHS@`UPpfPlcPY^vMs z&K7p&NbZM)PG){(>vMn+$7f`AePl3bZS)50uaO|RMn>+~xWIfk3GXQ4^%`0P zQ7f5wm66ZE1reW+9{bblL=K6-)RA)KcfuacOsw-SbI&YiDJ!a=NMQ%+a!I2N)F4;b z2h_C%qcWkj;J@k#>^=0SM2k7p!#L!|$1)91TgIf#wK?ex?*g|Gko9M%-(|ZXchv?f zG8%y@icvra`hq;0GTa7ZC1<`38*>3O{@QU`{Yi7G2o=dr%JXTKO&eq6W?I)s-#zEv2C50`r!WPAWl&ZZFlvq&3m_OZ@#sYVXU*Qt#P7`G6NIos7L6q#v?Xl)W)SA;ZKu$qo`D znU^z)ELuG@%x^UF4zwA;K3 zi4_?mvDn#S)s8jB{V+}_skJ`Xb~_5SnfvT;t6`}Ac`(9e+1sBgH)*zt2j)N!X(v9= z$_06CTcx`R(@cD7Jo!TpA3GSH>TTY>jPVuO_hAZb`8FSi+t7}Ie?2RDUZKB{yUwoTQ8Z+jWHAoG}l$;!#3 zkg^Y<^yKHOz35D?Y$bPy-OJ~U_8x+i-<@4h z#8Pm}QZ|+5`zkxeK=m=QZ{3(AR!j6_7B8DTZlu<1|Kd4_ktGZyw}N8?B!^&;f7P6HHML%~T6L|HZ}&B~0Fo6TszhPT^?Ljfi7 zyR$#O+*@eNUGfJ`cIM5KvD!^RM+gbP`Wl)eok0qWj5$Ts^t3h!Enf%Bs9M94$6kzd zX>I#6rlV2)2{Y4g{t2ver)kKc2VPEET}Ci%Ge9Z!xteLmPLA!7qSJcSvo}Eers9?2 zkKs;zk=J*bXHHx1r;TC%z^00Sd_*VfxhLULai@w<#)BGA5P&cWNXp<$zz7q(krEk^ zkiWF9aKO&Ko|j$0bpz$`mG|ci*|eNIEwItlyVs+BJnCkpZYE9OIl9J2$(XIVz!0AR zmAW{IwcG(r-u>mJ9v&^tvT!GG`FB}5=>_gAh)vFRfZik0XT+NS*9Mn8HO9S2UtVv4 zdtN~>BCv>MWt2lL!BNpt>I`qqpq-={9VqO#TDkg19W;@;**S}0QB;eny_-8#lvMB0a@GvHAEOfhSCQdyX4Ru*H?BTa-VGLN9LKcbLK zP^4cu5HY63$Oe&|lf{9E;S@Q_gl_RH&;0T@jEEU{OfGvqYY^HY{RB;NAbBTa*dPzg ze$nw`$~YI)_fXpYYDWsZTYIzBfXE7d`aU({??-c|WY7=3zlV z*{AgIX7-P#;XctrH_tCY#T~Ilh~^q^nmj=&Stuc{r?*P`!|*Ae@!jfcKeb%$uAqeY z8ktGu3=f=0P($dcgHD28rQUUEs7Y1+_2E0ac#p!*&&avr7^JD^@s~)pR%ltqWB$S_ zY4xFj$F2t-H<$FFfyg_9J#k)T^R3Ga-KOWolt_;S=O$#)y7CQx0CiWBv7wZ8Go{$4 z5Pe#XJq*LPfW+6lk^gI-DmdFt(bhshF6W?>tJsLCzW&v|R#r?H50#v!d z-qkk#XjFm)qs(%jX9ht$)3cdpXNX1!A*h(=Mn2Ta7KHG(-7t{bz6p3Re>~m!<4RSD zjrTQoWQ;=10{4l6`=v+oKItL(_X+<3;CCtkj#NepzHU>zCfB~P_1woOeD+=koX`>xhb?0TuodD8t*~-Fs4+W-!Mcxw2 zqlonBh7ABiu@wN;dmIkE;WcD8j%m;IHj*zNF#;fSD(mXc@R4!{Y`0$4>1%UW8QoCD zbh1bf8YgOZO4=AEnbvjFRRXGid1JtR;-kMSRwF_B z@p1V-5-*bjcC*cgAy-&k_NEzS2(@ znzzSWa2}sbl?NH7&qBWjcwknS%=f%72ES7I;cG$r=O7EX{<>lr+}bsOKhzrVhZ0nP zm&~e=u=P=8-CiDZkN9+PA~;Db%|ObsOs(;0L}+NA)lZG}973?YfbFB2AGEDE5Z{?R zpr{3U(C-xOVNJHuL(#Ojw3eM7Ad~$ipDzu;{82gEvhtt&9ee}``+9?trQeAdcZbh(Eau|fc}!nc zw?aHB@LZRPIbQ@&BZ}{N^>#T_@*F3`doKlu(>!k%vnW8&A+c4KS!Xf3`C{8aISWu8 z3hqd$+g2E$9)|HfXH{ogFTQBoa}4a=v&I&wAC6Z*0wLL?O6obgQ7h_}N_0#l3&B^F zIw@#N*0YfDe_GlO5hB!CZ9+SP&}+aPk5i+}X2DdGhxy0Rf9a;qDLu<_KB zLMdyEC{mV0_AyNq5<1zFD51!fHQUINvW@Kf6vw^{#u#SI%ySPZ#P|I5yq@Rv`~5e@ zXFm6K-PeA-ugJFB{{kJmey2A(M;SKW9eS`rr(j%PKS~CN*XEn$!8QV?ijL{Nj0fw` zn;4N;ac|DC2loq$Jz^|YvMf`k$@gRrOhzu>BjT_w)AiwNt<=a?B1eN6o$eJ}MXt$R@V^lN-}yd-Zed ziCj+Qkhjh0PYaEDVlx58032LAkG~Tx?%X(aVvq2Jl~MRTg4?fAbxuO+w%Y@7;)9@x4vX-l8~X$d)qD?8sD8RvcDUC+gBBM3L6 zry)pCUQo;7&94BNjj)D_h6SWg%J)anB@-PQsKe*ZAxInyM24e6fEY$<8(xFtWqr0l zrIkvR$WHI@#0l?kLN>KdKbI((X~$bW1&? zz_SY`8Ds{01raA+x+vbCw?jGi1#Ol-4@dm?^zHbHyLulU0524qN)wCEAdwGv%Vp#$ zl(^>=xg*1eRV%xdgFzZ{jhgE#WI$>*(~#g(#n}xo!>SN~P=X`%{V=ao)*vw7FPsg2 zXg}@Aj&Ha1tr$7_Z-_0PYwF0D9+xrdnp0UF4moz+MjygM=vlv8|lkhnc6pHSBA`*_Yqq_-uol=?b5= zRoXU;x|88#-a(0??qCobR4|_*xlBkiJyDhbq`Hm8&<>!ghWV0tR zMnH?$3C{4srdyw@ch&w(5nRqaB)PnIf0#IDF){gJhNGkW!%WE|H>5^Lf#M=2B>7R;X}%sL1^6sS_u9!H zWTc`#Aj`1B%S1t{{m&H3W$Fvanl4BmA>3`{XbITSrYuR2={=~-qq}XCFgz&%teRR5 zUG^SS?#>GQ^{?a)<}yJ6_;Qv$PDlZ8jTmv@!Ip_{0QrNsT(S8wwB1*$1rv^fjN_p% zHyK$x%g1tnaCDdT%)*ARN@CjzeoUKi;oD`?os=yu^uMUtLFpy9lu-8lrGG2${lE8i zZZz36$LZ?p`WbrkC21EGYmTb#sK#Iha$j-QLZTy_Ls|Y(V38oJu(( ztyS$JKcww2NJ2TFlXGi5@*%S_BV4(BtL=7uc|}bVbKJwR-^4i!na_7fci*nA{#1qJ z2t@e^CH;^rzW>~hI;KZJ(Voi;H7mm-37?-4DP8$e_VVq#VHo9ZiXddxX(qbT;np+> zWVw;F(iolZBtd}{mL=CD6x3E)!jJz^Uaol!&9cspCoJM4i9F_dLwOza)xReNi>uv1 zpwLj}=?F=<6sp`V)3Z59ai-JNcU>Xq)4?IxO`zBg^Vu8Gx_ncJTDT)-mC#W)ryI2s zSvz^of~2g+A*PIT*a&XVE={4U04HM=d9e=toTU^w8tVIfe5P4?>>lraGxIFBL#fyl zQ%a5?^NcM`1mAUPugvMHeNFx0Gig(f87cWT9m8@dybziD0qoC9nKez$lTYnbcp1-G zY~DGS=|XOg6ddlk-NrWizQx0zh&p7bQ%*0FPOt zmM3b%Y>SldGfYjzwYfoj7%yy#YjX=o?ZsJ|FksL74gfWH3#CdFAeVzGus;K7K};sv zmLMo&5hHA-TMqYj&KB+}M zRLGUw(mpHe)6DT6q)v=YDB$q&USmKD^KDKS?Pd7!BX0}|1Yv~~s&eI%n=KhoS!K4a zhu=VTY8WUBVGlrw{!n(hss8GdCc zAqjgKf2CzVs?~0g&QtytQ z!19~ad$#PPs}<{A+<7a%0)+m1LHQ|!WPSSQlgi^SVMYJowsa?+P}fiYm7`~gKAO3R z1;8(E2WYA4YrM#3KWuPkp7&X~FQM)ckLQFVP!Wg(sWy2|bHv9H|S-pV&%7>6Rtl?$8wxw+icB;eDg4-Wv=!%^4N`-LAAW3H{-xAhRY?cPtx4cWf2 zrM~=`2ydu?-BT!*1Fn3ZFGb*{KWReN%&Om5zUG~(o{j4_V5K&DZOoF(`9!G{+E>cV zJ(;j6bi*t@$LennFGMp?x(1nxT0pHG+8HDP^~*!#@(S;}o4eQing8@Q8@_F_+12(q zt@D%sj3j;yOYf?DfhCc+l7w4WQ9M2C#L)TZn`g4Iv+yk4Z#BQ@~BNFy=3?S07!@^M(&cGd9 zPTTiy3e?V`$<_J%?BuYUWpeg;tjy&Ag|pFoO&2RvC;Ck%?Yc00#RyrWmcp7oC}bxC z)Z@vevtW=ge?XD4$@K|DT%qHMPS^IKj0Bxk(O*KDWcN>=mN3YJex2Tr!lVN{@FB5K z;y7joBtT!=FGC1fCOYr0m2&H+lK(ah zcr`zJ#1DcjQ5~RK`lW$_ta)91^&d0m~~8Ys3P3O;cHq|dC8I)5V*N1DuT7PiXYBXs9YhtC) zp8qC#6cXb(rD|1PhXSi?I17Fe{?CrupM?o4#QefLiXIgm-cS9kH%~n9UJzxG+k0RU zjBoW>O@U6qB*!#HO_d)YjR5Hpx~BiFNy^^7L8*O69Xp1x_T5z|zF`?!g${b;lRqua>pXU}5Kq^+Y< zzRmEn0*3-)6%DFIPspEo1f!vrroer(`u>UQfgIVNWoRmZze%$P+;>m+5n26daCN5u zMb@WZVU*V%J;MJM$HTRr`sks~n$KG@qUE*@jF|sr6_8W=?0(@T#IrqX<$LRc-pV%B zn!^mZwA_Wx=y@2fRWqIE_rVPKgQ6lcg@9ZKna>;z^iQrjLe0q*Qs>~X3x>a78h>r_ z;)ixdoyS6f{#NcBeAYChzV;DHVwh3ev-r@`4{ySqHn?3;LUz`-kjtW3G7=2hylQKjDpNHn!dFWj`U~vVUQ}GoyZrv6yIXaM8t7 zY8c#bBU1&Vv|DI=V-o|f^cTr4`5>-@)=!eU)1)x|&~zlqfT1%Lg)vnyny;5n_1f-T z<7e>bLtq#UHFZ#21HZ>qu?sw(NGNF3TH|?f5r&4i`^YG`5=A^4E9OhpXCB)A5o=i_ z_-J$Re(Ys+@H{+|DNcq_|2mwPkiGgU;Kt7xw0c~>jp4(U*rH|%s*9n7(9+RW7Y*(e zx)A&TX)D9Pd}!Pm7RRWAEx^a~{h_ZeyZ~*%kexpf(z4*jdmfO79pmhAI0KdJtC<_& z)@u~OfawlgG)^ujyqJ$OaB_P`D^$=YQ5szYLuWV?!?-I;PGiZ1FZyeXno-@culjA^ zmjG%}t@{b+sa}Q&%Kd{3e{#M!HJ~@$*bw&-yt3sM$if2E%?4DfI}B#I>KM@ou*Vpa z8U~+1A7=%H++EyjRR4gBpai(basTI1V26w$Oj*akiD(pY!*{O!ZlKw0yrs80gL0bu zckk8tlBMe1MEaOjr3E%?;u?T?G#^!8V_x@*#(_jHS%4NiDmYF;w4Ngsn4e-)nqNYQ zM6UE7EzTeo^hpoBt9y^p9j}Ko=f<@uM(KXO>5%oB1Kk8v5VIhljvf-PU{Jxwl)!Ck zQ9&J0+*Icah}NMewme}}We1@4b*{N-Jdq&n#Xjp6&>!J~8z1dvT*g_>I*T<{a{6@Cm!2p7c+8`KKwl>^DW8K*Stc&E{Vm z_U9F?07Z0S_?DsFYaGy`4)a9G`1^b0_btlx?3t_R%!`lBPXav&h_X^H%VlLGbJ-NZ zYIZWvQBv1`W8}Q9{NK`V4yOR|mv5SAzVyq*A=RG3k0~^bQsq{1g)wg00x7C_fpKI%pBGwC79@UTs$+dG0!P2 z`LR{2!HV8&&cMDwaeQdi^m~IN+wrj})md5J%K~Y7SyxIIQ6Z!CA++16`aMajssm5-jLQpMDtW~s z6JvW-Me-~c=f`px0D>W2eQLV0+70+DcBdDY03g5J9UK=GceVH6kWIXlN>(GL!ya&9 zN1Q?gt=LZTh*`t$q~AuME9lHuzW%xGKsgD!05iLjy@GHpXk$EY9@T8W|soiXajZ?hXdSz5VJnZ+caeD3*&G z&Z73Ps7Q}e#I3&3_m8l}0o3vTE5|>42g(537<0ezrkAgt1Jx!)Axg5y56Bx@60ZAB&Au@6*cit zU)Hq))~sH^V*#oQ7bX=1p(WY`JGpd%JLV<_Ao6?tCF3=aT5E?@ax z_fh{bbRo|x$gl`C5sdUC{*hV^s2A<7<}awpwO z!p8>$ik^}VRELIhUP|UjkEw#Fa*JZ417I8CX_$|F6+7}JWjp+0*4mnRMXc10OYOFfj$pC&gQS^&kEoyY%em|-S#`KxIo7C5c56#$~qtC6FiN(nGU=~oXmL9ZAcvL zolBocwodnHH?-7YKnJi46{HmE_arl}?%_lrX;#}rUr;wXG*RyahG9D(?s!(9V5Cvi zLV?n0^f=w82iXZ~z&7u~=^(CbpfU&oJqS3DUHu>Ukb=9MURfVRvU*(!WvDu@7}+65 zBQBzP8;Sn?TqWeNi~I1&fKD#*cj-VShvhRam3?bwfI_KZiaMqo|7Kr)?cuXbFK@pP zLc~vbGsxYAtK}o+r3vd?I=1SKuX$5V=Hpa_;;&n*s*?zvgUo`s}r1r_(Y8R?CFl1bDv^qp0 z(H;NEMbaBJ@vqur_y?^co2G5^r6QiI_LbwqXTrJL9Pg&vd;o=2TMLht);$a+HI{pb zq-{Ps*Lrb&8Ni?bN{X|61|wGSn`)1W-1$IrlRcS-soNwwZ(6*QiBTvn4-O|}X3TVx zubG+3#H#i`?%xI5DX3|Lo{_vec(p14p{^Zt7=Ln@p~+P6r#eKwIX0!TqGlG(%B0?a zy86IqU_RM`O%)XJCAz!Fh+N0N2f+Q|BBz9Ph(W3hnw`-5ruX1*Hi#mZk5t; zpy+<-LET zas}P5hH&etK&>r$!&v(tz|coU#;;S)$R2mKirs2I-#|Tg%NZU$w!cC}8=m8n14ypT z=|%36_}MXc9c30a#P{y-YS{H-s2c*K9SECQNr}0TgazH-(g=PI@Q{)cs+ zp=`KySgVZ`^^4~fhG+RMYpF2hqtI&&vmK$guUFzC`Z3b&S2;|czO1oUwP-#;%s^@& zpH>`iavs#@-`N z1$GL_k9W;l@SV2;R1A-0+GLbzbPn+j?^Cmkofl=W3di+Y_RURa1W`ppY{Yo2tTzw& z9##0M2-Y9nj(^ErqcJ>SAIY?>-&9 z2dblfvkPzrse*tgM6Nlm@b%Q@fip!V+e(R4jp*B5WFjhuMCqmD7)>S?SH<{Xd<^K? zfq(esIk`^)7IgNzTaSr;RhKFTWoA=JYdQJB{uEMZ%djs#31Rn)cln#{7oA~oIz&Cg z=D}!@x`bJvwb6=p&2IvU{aT&`XPNqwxH+D`sXpWYLT5Ej!@RN!rUQ#Gf{_v|KU2p- zOr@60FH9J1s1rXmOrO3R;SYOOhlCd7kAk9`UXWZJ>gq;hWF`KdMcie^QMUKls38nb zDs0hIXI5xlxZIDpVHJIJJ9~#yfx~FARTUeWtESxg%EXtsgT( zYhvw(z6Kq*ui*NDAj6Q;(T~9nAN9{&Jidhyi!QT_ytTsfG$`{xVYYLZ^!3xLkf2); z^cwXtTlz0$E>8dZmF)Ok>BZ7VIU1nu#=Yh-cDxMgX$2d913bK_dCqSdut|XISB`(I z`onSPy2N{owb`d*Gm%e6em%wGTXZ5t^$6X00QbgzqAaZ=tM_2`++CFvS6r`QKu2I* z41HoUO9RZ2hk;>uwQg*M!*|mzU9q?L*osr{16-1yw+0_b#Tn)ut8O-P{n#P%dox|Q zW@w6`8)m|;fhJ689W(wx)19f4G@>mtu|35yk_c^Qu#~LR(NRwe3puyiYb-ek#tHJD z2+g8Dv`SG`4)Ko{sKm0Jsk?d*HEUvbWbUo8qm?qDfGC8?ju*-=ap%|JzK~qR+3IoR zV)uH^pJf0%0Yx6D4l-?)Wk?_l)q_IK(zc|i58BX`R3u_RrOkaf=SDK?VB$aw2v;m6 zIOEq$+o}x%3vRpbN8Xzle^l9N(=aaSM>yiH{swAepU}e;X9Ogl({@2V6}`8kDtYF~ zmI`l<5qawuXi5AB#9Sk8iU>1c9i!*5Bdjfj<)G% zTQ7JC5)R&(K?j*^LQX`EOXSfC0~|uVdk5GHBx7&e=;_6|x7P%x!XD6BNHH};m8$On z;d9Ow2p3qEujCwo(Cyg3OB&Rk%qpv+P6TF;)_n}}SIDm`BqrmvZFmZ%M}@S%O~37# zB3*L%_a(8t-Uu-@`aQy6*e6S#Fi=s#LCq2wszh9QN;)yNyJwvG9uxmTYFbM+Q><+l zyR~Q3>F5za{vQHTX}^pX#&*LkHg2mLt2ArGtHuwT*QW`m0%oIajA;ATly;HjxK5z1 zLX^K~l%=Q->gM!wdty~(otu@zu57J^h7)h|0hFECqbUw+<(AID&Z%s=_3%d-Bfi{z z)dA(m&SLV(L&e{%1^w^I@%^yN=BI=pj95wSIE$$4(JR?09TII^u!0aL<7nzD^jpa` z9@0o`;Yn$2;VKP=5XSC?5bJq!e zqLSVGcih}usz$L9;jJzv_f+es>xgsn&uaO#!WC z5~hg#;#QCeBtosV5}y~$G8`bxtD(`XP8oAl{0XycdHQ=pVA3`F zsyMMtX&nD5wY&&rc?wFgg;b-JbM~&c=5z1wFLkJjvtf@gMWl^Q^PlqP4Cp78;nn}P zJEi%Qxp(&Cw=DAyE;Y2nx7#bzv& z!j4!OMS4Ap45`GLb4P(h|Dwq@1$>`9#6ajN3fFTj@bIjof`ZM|bMLu4aQ~fht!SBM zjm*y%RF6meptrQIU~!j;Pi>AaEkz>zq3$`uI zcU83!6ZtJr>d{RdqxZ^WW))e>K%d(z^-UKM5}E-HXHYt-T&c>am>LANITY(R+#O$l znSIPWOZ-+i-87E`5IWq#DrW@&t%sCwc&{UDB`~M+PkuJr z4r~5_?2t%583NH^2|CNK9qEmyidniEx|DQ$%Fr}$zH!Ete&9VlQ5B~S)DH{T^`8=# z=K)+o*MI21jNdwGx=-u($O-|yXaivhA-4uWv+VxS$GFb6CO#KZT~^qYF1~zalnF}p z;7c-Np=sQhlXI*?}&rF zTGgX0$2lQ4c6c|-lxh?vl-3i2@h)ir6VWPFg(zyrR8sj`hxkbI2Dv_s_Q$+IwI*H?pPH;-3oP>ze^6MG1%1XK6FPEeV!3%Tqwg>vd!DW_|YsvF&UTuh2qLY^W+aBBqqP$ zI5q@7-(?@iW|ZsT&C2({xdN?uK$ga0v^8OF4)gVv@UI)&%&%l)-vq#?p>u>x{F$g- zAuM5p*p}=M7e?{OfUGkop|Fyp5)lfgXWpi3MG3fJp}O8h3L*Icw*e%x6wrKaEdOO& zdgj1Lw*X-v?!A?R6TR`;yj=Bnxvna-vK`TJB*UXUK|ViD3S}zgO1q<~2}Nx>@Rpv} zZatl2;Vi@NmW-}-ccV?qOy3W zLOsnayo7^r=Y7U?NmVzlX6}%tdSXrJo*kd%B-Y$?Dz|7N4 zasl2phm3$5X;zU6+3gOXMhjBUh<#*n)Eg~dM(RcgbAPgWe|@aI{cPz_XAiYw!k_Gx zMIBlXWeHN_@47;F%tP7lU)hfIeBT@Uw;+85 z&91WBaow{$=suxP;O5Vt%cG?Hxrr4|H>)3Ed(6_(WQPSY%8S_boEK~1+<(+^4X2M`LYlzL5GVFW1swS9jtw#pE9 z$(1QcA_915HlnTnMph+@6>fXInQOrv#zf}y-rZEz`hB))#6-T`kt_Ss&{P+3?#Xnm zEDqC!pBC(v-pQ5HoMjS*F>-xeUL1#Hot<;aNY^!Q;nd;xCbXqk;cA9&p<4KHh&f)c zzzISWUCQ9Id%+HNtzD{l7bp|W1u2=ZvfBh~bvT)qZ`+pu$_3*#U>VgbJZM{C4SznI z3d~Ubx18LGYJ<9l=U|`r-$@kV@{XqY5nz!8UWNCr_{69lss}2Xh;?N9g17bS4)5rb zkUkbyxdO2Q)GBQVDrst<1wbwj%jDw+d@;Z0oh(1qv_)Um+|&i zUAX+0MyUQzPOd0Zxh|rS@Tg>ck8WNkj^@|RDHIix?1*}wZZR?96!yv~z)g?REmMY_ zNG6-e5U<k|o%a1w65;SNHC}+pyQ_aVriah&wx@CP%F?J>+~$H7UoO zYLy~3?S*jlAr)~$J-u6pzJ<st{EyOJZNU zd5v-;3aPAuA*YB17>1Q@*dWp8ytkwP>DI%^YO79B+~F)oJt&^UaBlI+j4rycej0TK zf2E42C+O!WROK4N|uh?K+Z2@vGe_LXC&Zz7P9GH!xgsS<~$oC$GNe~D2eMeSU=6Yf`6O2 z@Qq1UwpOl`N#^x!!+xXPB^ySOA}{H5T3#jlu3aS9UF_qFOax~i_j^MI6GpbyQ<(); z076U?!A8q#48@6k{+FiFXOM)6p-~ia9S+i6?)5dZ3>b)51uGiI?dz=FvYg$)jjuw| zI!N~4Th*D?3k~bR<4+hLwIG_3sT$P7`_1x$`JKB>?GsGvBEFHI@DY8b4KHQ_dM$ zsZ^O&t~Nt#<3v(VH;u9M$OYfD$vG?EsnSK8wo5LEC2&fZeG8rL9hQrXB*{f_;s#%w z%(}R@CwJhsJD&JnuCoMi?gN^`nXls2BW0U!9#8H7IQc1|Cs&4W2L9$Z!_gXJM-YU~ zz1-QDcc~|wp&AXS7lnm0osEyq-l1n%rDRu;61>^3aCkFcZVN4diRXzG>^InkV8m3> zU%6(g#Pltlcmy(x#QClbf83dL%>_nsyOI?&m)UALuDeNI#j`9e$c3+?PqjW}pP`;q z0Z&IiMUGmdNHyW*urHVIHZMq(8LBkD-=982xP9Qg{|*YlG>zA)rk%I_TYUF>QulD{jCMhV*Tnai(5WKGjm2WOWg*M6C|3#8D?JdnC@azs6rIHrpRTMC`*=H)LN* zq#J5{-wi zKT!uCueMh|nhME-jyb(u9yDONe45{^2HR+6Kz50Q?|Git1)ZMG<;fee zKpx1wYgq|eYGYX5eZ zIG7tgCBnbdGdiyBf?GP|f`Je0vmGssTyjJB`FHgOu%^``U@p$$$taK!_Wu4C z=syko{i74je|D`x@84pnYA13+R4PP2%WBCyG_Al+WR@csrA*5=g6Wa4cxZLpQY@9j z1q+e$ke8PiIQ~+~0D(&W?W$eB11ex*0|#q0&LQyHKGY?%{9~X6W8yP}3LtEn`&Q`L zD<;KwHkuJM74lo{yqE2laSjk~KU@-4+}uCMw+GEj9Aj;lIM7!+XRts<|GujlnajL# z%lBaZ<>+KEaPskOe~z^L8}tugQF;x(lUgC9{+LKD=y8M#{=E9Emb^1nJK89Poe`k^ z(Pjp~ZdRGwvUj`(@fn0hxx{~sFMb9%x3)`MS*x1h*S0qS(=8>whXDof>$}xH@Vo{D z?j4q}7Bc;>Uk?N3*<5eSz)AS^-D)4$E`p;uqju##kA?^s%<$mJ|4J2OdIY*dK@Y?w ztTy|lH>Jo0BJVYosc+?6@Oa-!^W@JTH*pWhyg^|0DF1oimOl(X!Svz$*aq{_4G7(> z`UR_Ixh&n360q~^*h?2y+ngm70yj1UN11zS+p_4{(ii=F0s05nc`x&K^C^o<7rgS> z)MRj!hJ33rgr(zxlvc3wCFz$zEa`vGQ5W1YjA($z|9%R@(VQ69TQA{%^wwRt0PjK0 zAtJQ=@BbK=U;GrfBXyym=J}@PWC@nbD0r% zQW&_fkj(r)$A8nHaSr<{d2dZV)WVQ+JDo(A1=m-b2Ne{iQNG;;2y6dy*zNznf8k{# z;Ln&(A-dMvxc+~iAkfMcfL)G!AoH_@&f3?I4fM5yUaqme{p@wb3o*Fk`& zre4)eIi6PYUu1^9uz6}{oz0}S=zm_rM&Ntaah)U1r)I5kjF&GNw+9E0WU24u*Sv@x z)sZ^dM)*3C_0;`X`Psx(o}WJuG{@$BE_vB={!eupL8jl+A|Y#wzt|66sJ(yv0te`> z?R0ZZ$`ap%ZqBcMUne1+2QrLy@_)zYvGRCk9b_47hg+_BX?nSJb6)*pO+Ar5SP82G zVTm?K+AZ&3#e;Y`;FQjn_;z3|f-+_Uu5_qNM6^3wgZ1C{i?~>>-cwY`Mt;HB8o)6> zL>^A<-66APQ~JY+MFwceX_)TLV)Y$rTdn|xNT7yE-X)I?n zF@&L@!U^s^DgTWf7eI2yJNWzUciU~6H%AKo`np{2m@%bX8Lvgqcc}p^jCUaid0!rp|X+(EgbqFhxdJ<5DbRcJUj@a+oTVA z1vJIQPw1;KYKFV7@WSXCA)xN0Vv1#IUtRO9OqZBzcjC=&W(KXbLY~VY?uDUaL+bw# ztA1nSX8n1`jK}F*e~|RYa^@szn0a`xwq3*uw&Od8PMh61ShvI5wa6GO$QHtJzf8ju z;OX40t|I&F-M{sKrU*Jrg8n@FgeO5uVdOOV2ozqu=8aJjOWn~FZ$GB|Rc=@rJGu>2 zDYG(WkxotDOAzofq32Rfj#kMu-NgPfnlyvI{c)+@;Jy+Y2YP%BJ+T3OOviUr{L8iWB0MxV%4=cdVu4&I$7BsoUdwq?wY6~oG= z5q(loAtVVi^eA{Yp(M}aIm?^B-jGE8RM!^2QF{fC|E_>I3PXs(a8kqWo0)Y3m5)Bf z`rAAj*8^bIy*=r>Ck1Zk%&J08yCD9+i~hwD;OChhtM=mNx+)_pLd&t{xN^K3t~cH7 z0LWp!<~I8|gH3)%UFfx4#5H*|f2u#cDmP#UzCkpkTaCgpilw~`sfkpB%pG`!-6^KY z)@^#wZSJ^vnR$9oPuOYWI-oHGN*!ohTUX%;gewpftVtE^`Wxn2La|vkbmW-wkw|$x zU#(pb0q!q~Gm&E0kQnR((lqYD%s=ety=;0wQM#_#xVAD`aiG>}lY1A&tq*Cd!33+4X`vvYqbzjje?v=9FNwl9c}bmXOU=Y%Y%fVTN74YJ9_D zg40u7a-!!P({$n|WL6NZj9cgQ+^O#Yc1+|~nlGO*Qb5jF6sQyLlboy;DiiPegs0|6r z*pc4oE683)Kytk77i4cTm8clZ`Hxa#wulPiV+r ztARH%X`8*O#3o3MkyVY;A(9bEMa0$b;}Urbw*gz!^@7ntcF&mCXosL=a? zg!j7tcJmr6_}2;5`T}1xw_yM3GUWH0|KE6F74f2SL4aN4V$I?rbIU2OAlAvm!dtuz}>S$ZaL8x-Ps7ThvgbJ_usY~ ztnE0Vn51=MY+1U z7wlywVu!If!?~OS&?3N(VY9!`V3{!pfE>2f7#6&{F%(hRhuvn=I(KVuSY$b;2x9}A zbp45k_OcfOZKSW>k{teVn0ah)xPRL=x1=pAT*&CJz&h9XqhIpnGsg6)t_;ogMn@a= zs*&~v;kiPa0mur%HvS8&^qK*t1zy+877oEh-$N+j2?q^;PeV$oGZ3+nCTJgva zR$$_=ZQh|wz}R7IB=hQG!Q<`XN-Mzs$NN(N_?8RsK?9sdep6GDByWn|gH7<+me zs41K!Z{~3eek~fLt2AS?YvKa#(5~p6OIf~FhAs^XH*D01^A&r)Habp?|MaY4dUJ9X zxpst%Pe90jn=k!YGNkbkQ(s(i*`CGHq@Vv%Ieh2l*z*imGv((R=|@6}mIeqJkD%`Y z*I}hhvdeNAsKrPn!tct`4_ryM?o6PFI3UDs#rMl^1$cAOLBIqeitLX3@tJRHim;kx ze7GgQNHPB9Imdk;rRD9(KGiyLk9;^mMwdLr<^oh{C9AGxz#ed|h1OG%6o%Y`EC zI+p43jU3*0Xj)$Qcj{h39re2VeM&Jj^qRQ&R=(A{sZXss=D+WP>Ehj=@)o=B|+#;|2O8UxRkN+VVPyr z6{HBfu#Noy#$WQFdN8;T;UkZZT%-%ELRv7hBH)CedTzg(Vc zjU%=BQy6eRzt04n1TLdZWP#k0scHVbK7BywP#KbP0< z@Fs$EHOj#}D*J;QtyH=WHrqu1mc|O3i-nEmV36kT$0pLsVWF=PQTekT)rFZ+roSqz z=Er>mAf}-HG5SmAUK?1{*>x5dsKu%N_&e#9md0O_k%v$j_zGG_7jooN@>XimJy@uM z+d#o!E-xkbrQ{&o z%LTV1qivp(UniAtKB&MZkIQMvO!s})b=OitB%WA8D30D_-h_P14@;S$#D!`zIbmrA5s4di1G@vm+m=P>fop@K`v}ux$VBaDWotLw8Br1ULfa#tQpx9@BFO~NP59OuSRFM@optb> zvWdhTsctkQ7;2FyHeJ(V`fYRX1W0e>w&5ZUqy`A~(;WE?kqfd=oUURC^-RYj3k=31%;)*KDaB@rYy^WxpqM{n~I(qj{ zovGNV#(dE(PE`Hau$QbB4AXo|072eP>XOfnK{{4CxH0+) z5&D;4H3ryMx*FfCcWo6XV^W2PFw+Ch1fioIl&ziGBuN6Va#-6Wh=Ro7+=^T*Bp%N>#4VAF*( zfaJZbjf^=XX$6Jxhns?G0;`7>S(|XTui<>Jt=XkAB;3B zBx<5Za?(Lo?Fra%ZCZ|2Ssw9eiorq$1*8jYh`Do$loK3nsF4Zm33laz;c8Iy(K0|{ zM-6J|XbcKY`70oY#^tMTzeXa+vl{Oe!k%V})f|~VV%FX-Ibb?X!MK&c^b5>$&cdPn zrBvT-7ZD0tbX6N_X@cR^eeA6Nj!hI5iwkS-?FBH7ziI9>z;=Xg_+YTeU;wAlv_i{$ zxbcb;GR#`~Y(RqI6hpe7qEmj@|7@yzNfpih`fT_G9Y*2XaYO_S^~hUOMr^`=mL)dk z%uU|Plzu_Tz??2qJ%Ai5H`SX7JMWH~9#=mVeUUAmFl>6B(tSdx5{F8kg~J;PIB}tK z*F0~UPQA;y3(vrhnX<=$ix|v@b$<+~&3lJZ*4ye3WqthKv4Hma8NdW{%jC``loXJh=q__^(D0Iz?Ohnq=85^3i zr(TxD<(wS9ATR2-Y6cCQ83#sQIw~Zc)RV zu*@5BY64U2QM#w;@_IPji-%;68f9^4`7%T{wL~~T8LQ8Y{6WNU^& z<0ew%Hng>5LUt~ZU?Ei|JOe88cyo?qSL05OX9hZ4Qvl%fJv`}$nUi&IvJRNyw)loB z4)}FN(u!ldv@f=3IrSq8ce!=kzA6|s+Jzq>&d7;Bn0BX$KGB<&FrfRFkfc&5fg_m^ zP%@%@UDBYtEi_d(cs+ACpAqm+@>EYpIL z#B!rAg#re8+eUQH4cec&UyG2N1Q6~j_xIu=9#?kIBgN?T$%cso_6=rsWBbY1o9H?@%sFdJq=RiH}m5P^%7N1nKB820R1ex z=U6wms44fL&-0`hx#%hJ(pX=|TJUFF%I&zpf7v_7{rjROoQhsk?`?FB<}J_NI76K# z$~|-}z~z(3?qay9S=T;ozJuNh_w--GiU}vV*bkOkk`9y1D;d`lW(%syZVcOu=J7h~ zLb;U;p{{dw)h<2w=JUwZSiE}fBC7#93~-=ST~5fBRbBd>#AYDcUP^RTCspuoe`V?! z6{yQd9u4EfX~>+UOkS#fZG)%er5DMVc>&nysj@VoLP@Gt>$B0wiZ)UMLScAtOKTAE zw4U=o{~bGx4m*-6+WAM&KPWenkSO!Wgy!kAGbiZ=2)ex;NcP7ehpTh3B@`3++5WwF zdQ`m-t==XMH`f+VFs#O))eh+M0*4Yp9MzNEyfVAIZkXc| zce!YYeL$4ccz4YqOv{V)H1u#+9a}TB(ySEqqf|Pg+~n&VmebG}ap%96z+dRoP4 zzFXz;px7j}{0TANt^c9vZ28o?l29nu@sChmVKX~hMg?q-}AKolINY){-(N*Rbj zOi&CdD5=q}W&>WI$}xnGjat5{{V8Kp%T)HAqTA`1iyFNR@3aC=?D3139_wtcpJf19 z{vuL%@WunizTegN*QUUQIo)SE=?S>X&knSIVtWR^OiJ$U!f&->*2=FeWa}kih~a${ zY>XH~CpD~(#Smgq&rOsZpL8~N(ZsaPKPumWN`A^Km_7h@X!-&sJNLL7Q|+u=TaRwM zUN?}UVFg}Pl$C{I3JT};y$BnNnoXzQB({aUE4*ldL1awm@R4Kut$cG8UR2+ufAPcL zemAYvns9(4BRh1UXG?1Z#}9j;{nUrL({?7tTVCfr&;DLtbTI_0K(TB~H^*+!F>>zhjSr%Xg9g*=!tqO86wx|p?5SCJO1x|U+!yW{ z7=<-sXP+D!3wf-Rv4mN-5VuvwPN?pvRX${z<&g9(GjA}kHLnQ|%7fIV%iZTL=OS^p zBMvFwq=#nHuRY!_Z{`V$+}I&@Jsq2pXR^OQMy@|QeRsdDdi*5KvI$vr8Y7oCt(!GP zefd$SF7HEcAMEbi6YBzQN2GVA?5$VqD3@hYfk@PC?DigmmKk;t~AC)SefeQ!l;de(h?2zo`zLh5{g)gd7SdUcRy-eQ*E`i7QpLR(vBW}fA-r=c^ zysme=Pb4HHKpbJzV4pEExYsEbIRCb3d*Hh`K^a|P88V52K#G{b%+kz1^o1>@Y&Rbk zi8FOy3y@Up_qy14Zm7ku{hw_$V>a(7-$xqFzAevlXI!7#8~d8ZJ}lETnKp6e^n5As zFdCADh|HVrc`p|%M$1;%`MnaiY1gT<_Jy`-yK9usrLc*nn!78LE8FR*dY|s;jO@)n zptF=F0pxRPw8ai@y{WX0g44#j5K`1k-x;qdAdNdg4PtI>PLM-$GLLq) zkO6i~_zed6BQtmu^9vx4VZ%iqg@oj#RGb@L018;x90BeeO3QVOqT9dgti|ooPUAhV zv;wJ{A3Cv9fv@T~LXzEh0I`m9+1%hq7|B+9{geuVAhJvSKfwK6%0_nb$~kS^0wy^3 zIW_U}A65Sk8JV;OElSzTl=sptrBBQs79D2l{=ql~<%^lJON}muXc7UAPn-3*2219m zseqRA?=UD-AN+QP<%Qm;U-Xam$Vsu zk+p-Ep>yO(g?7PjiNe@3;|}03we`(gx{BDHIRGuJCU$QF>u_z47*H8ydxJOcloEcv z!ZEXH2e4IB*zK9Qf8Psm45bhW4;tZ6p`p4(BQKd5gymOQtr@!XmbVU*k%x zt}b=_1GgJP7T6!{N*4 z>+cV*>s+$)ocp<-yX(11J8V0GrC8XH6(Zu51582zi};ZD$_|`K>=#XY?`-Q;>gjpH zNqZe{sWdsaxo*OM>AQyiVKoyjnlG>gR1Vg1a%ffv@we&@RaYaD)e*$AMW0kLuym+^ zaxTuDVkP5B4gLttrvrtM!0jv3KZgAL+Ik!?Bn!uw0JXZ}Q5+4_x{=pW&>PPib|S~E ztR+a@V_d0YSGkH0u+osv50pg##g|c{{+sk-3&k)?n{Z3eu#Bby#u8Rn&o?^)gMz9S z7C+5AD&352@C{&vh~vJL;WJd#YAH#_n4nS~(_p4;d4+C;G~_JfLE>gbWRiOv31^4? ztikJ}2N=qsAl75H?>lXQ)kR%+SulkE2aRSi7h`u>uoJU@OMxb)Cu*P;vNXb*^5q*W zK-IapY3Y5HsMRX+4+Ikd=${pY{ zFL3Ury52#0PT1GCxSp7_(&L4Q${zaljw6k!p#I|L&0oZyWrf_Pm8rlY^(O&i-MNH# z2D%+bqX2+u)N@LNYGkw58Gz@M2m_4m;dc7L>4fa1_2Pf)h}m*~bW^}N>4B0EzcH*OIKXXoWp^IHwos6f0&EzX!_%TV}K=Y9Mgr+Bq%JCDJ0^Ryp$0S zM$w6jt@A}lK-UdHQ&k32^bqZQzrYGC{j2Jmb z0sKNyp)ZOq%EEOS9IG7(09{=!NBk!dLjdBLD-&am zG(ZWRWYQiKrWe?ZKTz)4nlX0Dc9YxzC+(v|%s4~DKRubI_5UGzDIqgtj>|T<%bxP$ z)o|R{NXWCr$R@vp+wsSV%7YY7j|}qj;<7RF;oQCMUUbbkNVHCkUiG;#`@QNtY^<)A zGr{QSUTLnfg4_zOp@QS&&|Hn#ed}!EGm$3>3|KT~!5ZcNdO9%GaW%_ryQH`?RUq>2 zp6oLk^D{epzw&)e1&C3yWPtwS#ms8G|8S$LNU=k8U6NE zf1JrRvC~pgSLM6SORO5yU!J91$NgpH{SkeSf3Ke7xszV0k6trbn89E{OOIt>i+zou zGAl_wv_A*#U$meTCOF8c(JCN#*PiR74-H|fvM0yvB%S%f3z`$1R8AHquuWV%^CjT+ zIBwo{HjA%0jY~6gd-V1Z+LHk$rxk#+3^V+%`%7vP>JxAU$PY9+thJl2I|r4QQyaxK zFmEG-k9&(4M=d0RPr>hZfb^k0qt7mPmu}8P?Z(}|J3B}GDt2tscOp$HdxsnRxBCKC z2>dJ!fBlf8^$b0M+4139)g`)M4!{s!~9s*Usyf4M*9-ouRm* zuqEs1o!38pr%`*{t>plAk%0WIplN%q?`38{5gH(41JP;gO|!SjP8Y3|pwhX-cetI_T? z-*oU8Ex42wlUV3-V>b&JcryTySB;7IW~s5EOE(_4Lz7pSXoTu-b-VEsz)U>A0IatA zn(W_JON!wm?GyVG2fww%FUXTh!2k<Wy>B~I9U^+~6T4uT9`7>%bxiyL|B^;-! zsw!DGQX5Xx|NCr?+L|D zUh0WfmIFvyY+v1EW>DcYWKfcubHo{%$n@*!Jdc)>=yjCt-jEybH34?0keupwXOa5T z9;9$BVu#fZ5{1!~sKBr1_%sj%fI3Lu~*!re{B_zjA6K0m(ESnkV>6z zE3@zWh4;*vn?SdP^GA|7pHSCl0^Um8P9;ey+9)6I6Zk%~dd11X^ivZ`4!$YV>9^8bfs8)b<2G6=QmSKVb?Z#8 zo3};O1%B$42bGopw=s5+WUg)T=JA6)p@|o``Uiyv1@Ei`hPYc?$`<~zBBmFbz zs7ez>rDCu36ns~$msh$lN$S+QiOIe6SJjDMs9(NYsR?ff z7(6EC*ne(;{c*q|eDAE+-cpRZQi~r<4tvf+r2^c`%BLzE;U_kje&fSZr@ni2sK;iu zl{6Ub+b@_A))ChE$vR>Bs(!?d^;N+TqwEYTyB*6My-_pr<>I}0x2^3IyV!pbh-D>k z?z`5uo%W~VmP$F`-Q_ZiD`t!?eT_HsT&hdz>Z{1fomsNe_}9C_CIk++J9rP@;(j-6 z=f^+AUc5>b`xnnyrdCPs7e7@o2>H~9q=LTzEnq>OfEPJjsl(`0gmEOsOkNBrwKp6> z8gV)lKAnyNE*e;!vl*@$J){1TI(6WGlNh*VFGsY>3mw4-)-2rGYMIABE(aid{t;;A z=;&A-ihM1)cZ6CFmsGnWRAbB7m2QKx{lFU!lLK3)>yljiw|8}C*4WM+~8rx2fe37E;dU6=2SmQM^=>iXU zTW3RT2w7S^`GETFAMWwEVyfYk+L&Cz zt%*HKH4`?0YY#U*i&`9FjLS|tx3bp}hR>a2y!?O)Ehk{3W531;Zq;Dgzz;}0MscHV zf>VAaBu5E7&M8GjESn=h2_KMTHR^xRd^;%&Q1zhftNJN(tQ1OK=0JCo;7_UP$kx{s z^xFBlKl?}<+1yYP59t)!^Cu~q8@6K(Iv=`zJU)~vHzG|a+VjlgbSigI-}=E2rDp*Y z4GuCrV(U9e6}){t_fx4PcVC!W6I8r{{!IN38j6wSP((W#@r|w{cLj)-b@wce2wA}rr{jx{$|1!x=-vu0(HSd4a zm7`SwqmIJ!w@&YJ{z5$+nLR($sWnIJmY8bT;rR>{QRTT(KeT&BVfE98qnV*pGFRZE zINqu!rXKwUcVh$a+lN+15K8Eve*@kI{qUVAXGB9zY)X{lEt^o=TvYS}t|WW-LY}TvGZSzN?^dF^ZI3cVxeCL!Fwsl>9>G7ChgxxZ8Ut&un(|0 zY_M^Vh-7EO#;qDScAn=p)Dxy(pernIaqy<%|Bb*!1x>`9r-hFIxS;jRR_24H{10v& zn=!|jon*?GTH%oKdA-*;==o2q!a11%9~}Eqf`M|8=AR@1=cgDtvH`Pmhv_bC3zvP$ zvoY#Cmc_VjU@Utp2C!tJ@5;Frwbn06Zs6Y6$woa-AT)cGf+ufpZvoNc`}MZGxJo3X zGF6&AGR?Yz$}&KytlPH!!4>>|8KwU~xEbb2PJln=N?3Vlo%yZyrNVj=FeMO*Y+K$f ziQPTJ1{1<(d;*Q1y$cT69k1xAmIrk-%Nu(ZwA9VXU>f}cQI?hzqYnssZ=c$=EFp2{ zKfuAyGWWRISS$=>l5c%wB0a8I$dWw&(xME}pt zoqkS-A`gG6GlJm+x66Cp|B3Pn9&SyHu$NZ+)-M6$J;59%E*O1IVV4x`UuVs~`zkuX zJPgY4E&VrgH<3F<0+Cp`Gy(edw7zNelc2VXw>HM$3gF#zRJ(@q4qWVTF)6G?FvBQ2 zHfYW(5x+P(`6aivW#gKu-TNXb0jq@p_FWYB{y)wV`KO%aoE=;n zYFhXl)M~?sj7l*B%Z){L#HG{LL0RktIb;AIS-= z8cewTfvaxjMdHoF5WY#?Q;895&Yqw=tOEX608Xt%W8nVlE4A$tyuq4s;=Rw~ zPPz>C#)SuH3x>xfqLG$wTpZ7Oa*ceHyLL7VdiD&62K5xeiroD}y=sOW+r-9jskgqi zt|R#F#?a!G6!mb$6^LMr2FubVs~b4yv~EZ3aJ)$X{X+0|ebG`ag)-#v9E9(}%D1{f zjeX<`u}Lk%PU;r-(YDZ!U}L3s6canX%3FvA6+((**1cvDh)^UvPv$@Gn_>h#dtr`*78D3l}C1n5#gG=kGa$l`Tgo#;Jsr8>QBvX9iY# z+RI})JMnz;@ky>+>5ieC{qHTjmT%;{T*w0zMmP%;iaZE6Mdsu<6OjrDRZL@m6V|?b zAh~~@t6t1u^%{r8`v`G~Q?I;rm;EAK>^vIE&C6{+#Wz^pSUWd{j`HFMsC}we+lqb7 zKgOA(S9-c*@OW2Myn@C!;tLQit-suAS?(xzb_mkyGB^`$ zk*Soo)Fq!QQ@VadADw$MOe%x5e~sKWF5VQ(PgL=95-5DbqB$kU%&lE+J8P38Q4>9S z+9D5{J70Zd0q-=L?>VxRr;aKdf;3q4KV5OrXBATK2sq$k^YL2*R^Z8EH@1SnxX7jN z18W&~O7my6s8SBdZ8qY`c4GB+MH89o2t6Jr=03H(wO*8#Btlf0z2`=}A9~-ujl#L@ zM@+o&9#^%9&O*TQdlrv$i!DDJ?GGGXQ;u_XT$MejchF%SZ{8?2T;?;97S7(>AB(kJ z26w!SsoRgj-*@gZ`o;>wj<-}3*`fU%PO}qn zcbKdLcv%!oc}p<>MP_!e z)97m(;rV>uBF8nIJi!F+=T0375KM5lh6dWLcOatgV83^%flqmj;WWN(@4^1Ik?^Ht z_t)M?54@3{W^1k7>#URbT-&;irjYf;D~7J1I6dfR!Jt5wD*5jRtzR>n<`(5w2`x;5 z+yagt;(5zn*M~)}yLXtW`{9`t70$<*)DdrBsvXdRWRic~VrD|PqDpz$7}S$XWxmQO zZQ!wg8Ggki!8;*G>Z!Puu-@(fO-6|m0_$j=spBmg(6!LJ7zasGtu zOS#2)JI`K>{aRrwMpegE&Uejt%35pz;)7D6b9jU%^*`M$aBG&-5;%4=#r(Gc$Mi;s zThqcoZxhX2J@i>{=j#9~I^?B%lLdLA6H>G(A2UMG7#}Zs4~qF-?jbY9-i`KevkM!X zIx@Hzfzc?+6ORN{tYs`M76)R)ad=bv1pk1;4MnLLNNZtS=6RkosDd?7dq=>F@X%_P zGChvy49tZ}n4LoCpJS~(Hs0S%9zCVmEWZBO*|zD6h4u9Oq^z)4gwhf2GGu3t1=N=q z-`OjX{qEU=#R~CREx?VEb!M($({`**6c7F0qHp_TA|-}_{oe?Et=b3oo_M_^GIh(A zGUAIDup`JDFmV^eNJ*W5?)iI)VY3o#$;Lc|{&Y&g+fL4%VagpvjNubQZz>kPYl%{` z4oaYd$|`f7h`Sn!LariFj*6II_q<)@UDR$*PWi zr8jSZgw7Mx5WH^_vAoRjxdq-TCyp-7=@z+$6z7NW>*lxgI;aG2ViI)3T6Dk29Gebe zy&f*zCYclZ9eGL@eMH-FkxTuE5@d;s4^#FnnBX}Zm|$YzXnhJ{&F?+WvE*I2z(BYO znp5SQTjZZp&Cf;0=tUL}VKBz}?`z#I!wi>|uOi|PVgtv93)si~mnz}d7CZ+RPho!| zK3^VV;WB=~O{MlRn*;QWs(<&*%-piGE}1S)xl#nbCnIdfNXk|D3F6((#b?NEM6n~r z%1U1RkO#i+Ju?G&kI-h_5)J_-6Cx(Yg&QJ>eTMvLNc}Ws-qFJWHtF8)pl-DQC%Lgi zcBcy8?Kc?~!TRF?D`*_hk1%XQ*|;9hNj_ccz-6nCD7m9298TAiE*5wl=b1v}#F=%^ z=13qLI7xjoxjU-8^B<5_vP&xxGsV}@!7Zj;PVpU%Rcz=WEdU91#A3$oM2N|2ZFVRt z1EH+_fxjew;TO>2OYz3_z=U1LB8Jm(8lPFUCAzT4W!yb$(JpbQnBMKxgmXM9Fi7#Z zVD)vc=Gq02AEuh_b)Ci;Lt|YS5Zz-{7oiQ@Dx9(`BkJ9nrY3)WnA~RE7j=w}7#%M< zQ>F1)%}QA~^v?C0+Dog-hKJNEPU1`FONt{_o2C9Oy%=E7#h7e5Zg&HD-gVip2&py@ z>K%M|u)kfCF}J`sw~GFOu^DVuKCa{N;7Wq44tSoB;xDE*V?3i$g~fMH3Sy_j10ON9 zeDV)JeI)C8F}EQ$hto`Y;^?gxFT}5EBR-U5ELb}bQeKp+J`*<_YB3sK=|RJq6k)2> z#^V_i#5pE82XCi^dR&X`efcxt0#mb|py{u_2Q|S}n4;e*?9?QryMChvk0?i)q`u>6 zz&B}OJ?DOnE>=SU^#usF6IN^q!*|Y^<05d7Un#x-;%Ux1m{GS zMU22q4#;`>6a&$h4%GA&CNX%f!t6#yK*lK4z1IVdv0jjOPcz zE3ZQm!W_lT6)_V_X)R`ET$+pNDROgXV2sr$_)Y6m0tVJbIX-a411Rzfz6!UgIIpX} zhYN#2dnic)M#eNnv{B-|))3Dc+R`Do7M+WLAM*T1kKD^TUvxQO4!J@pFmZwF+D0NS z|4PigxvXyX5FwnIjW0xM{C+UCUuzKa9*DjH+H8^!yy#ot7LN@2|t zDgJW4F8O;N9kGN8ln-ZI&7Q=u)-)J;~Q2pfVpylDTZoj83!wnb@$3x^Y21vVu= zd{?-7$Rv|{#y84m*YumAV#olehDH9u)8g5mo5-A4RxLHya(PHgiN8KOOR7ZIFyGxc zaRM%WogEU5-F+80yXofjo_p4$L2Z>NrShe&URV&}+GLr=s+jsIfq{{u0RcPrMw}KF z@DJ1^`jdwD%rDuK@@`B_`%PTG=ASyg77Z<)iw|kSrzf;rcj;V@58lglgrHa_=46|B zqfdBU`9;iK2@FQ{$m{O8Bdgy(o4C;_omsSXy+fFALoU+fl;Meo-jshf3quNGi;`$| zKIs9s8Wv~v2x9&i8eQQtGLXESAntG1_7#)wWBO^-!*P3}{wSwLQ9l~ujd?XII(Dzj zJQ}#FB51-QLFC07)bdPI9YuS5l@2WrcB{^3X6WsA>;DK$8*+H>HYguwhk9zT8mN2w zq(H%IXiU|#qoAn&V&%Hv#QWu}22^BYRAvxc;DNv_L-A}TalM@}9X?-G-zPYO65sZV zC;093H%wV=tcIMNd-B|@|7o|k+QJQFg3w2ib-g3P63zZ`dxHwR8IKhV&R|s6?7(Zv zF9HfK!H}jOIa!b}0E zS#f0rzenDDal8jvGe%c96iN=SO55LCEtP7E7hg&Bcp5OehV3N=cFfkkuJE(yGq9-R zWT?})#>$e5KTI+iG(^=H^L~xT5+j2UZ=Jt#VeIOLHB!yvg5~@BK-rq@WPxi+fy&M1 zA=7z>J7gVut|5l*7%BLi2>UM4B|0spb-R4%o4hJRV0C?SHzwg9rf#)!GFUdvh0dvp zzW1K0zKG>cY`2w@A&E39CmxqjesBar_y&5jQlbb^`zTs-EVrs~6<^2NW*7enc`+b> z2X8R&b-f~E?hSTq66t8x_QHsk3BWHUT7)l=+uBj1o zpRS{$xSYNiG@t^D#r4AL@Pl+bKBhWDM5tu8Ci{k!fHlQUbKeJKDH zzD6HbEFjQ&NU%*Ry??UIv;*}xdL|T6%)Gp}+L-Jo3YgkIY1U1>i+`l!aX4>*tlfQ| zv8t9Z#sgWp&1~k%n}aj^9)YK3=N>P9@c7OK+8BSCEt$umivmtqCXIuEu=E=eL*7_ zCZTJoKJ5C^)pcwhk&t%`_EJH@hljZ}FalqgXpw0yiK<=A(K>E54324^@g9*kX*^9x zaL)YqFpIz|%tNhw7tt?{5 za8CztCqaVsVb{VwC1eOa!K-7joA=)e<94}h(+XDuNb*RnUN$&HAveeq*<@^X+^*E? z)(%LhN?7R%*D4gU!!=VUc+@%5Jb1oF22<7{I@<6|wvTlC+k&G)qA>%faF~9gvWiT9 zS9ABW0GTjmrmKJciLuqfd5D4c%fVv7HaiZ@JFxo(>(%px!){e9!a3F=y~fsiSC-zI zy?8S9_7f~_&EVDw>sw7p7IB`bGJIQ3lVaD+6}^B`G|p4#by&_kOx`&xFRa3z`}%^RVOl>*~mCzlgugr5-I_sPC?J?$H6 z(Djn9j%r<4b#$-j81|)2l$fa+T=Een%Kzr*{W{@}U3RaZIAO^c7P17sMGXVlX$Qm6 zgqYyS!)!u|d7mCLk`V>hW?|RLJe_)lHVvl+x9mATJlnm#FrFBMN`QmPm)PA3H9`(! zbA5rk5hv*+&VKbX8dPaug0ar8vaAlV8^lJrdf9U&5|Gm z4-w*I1Q@fY>Hu@D7PKF4B+Fs#2)9_9g_E$i2Qlx$#CKh`bew!YYGf2&y1g<`&^%7| z639d`ajo+Ua}h4&RWmf6ofvMc+cV6L<^Uk=gEZk?W@is@a_7k-80kL zjQ6jVg5&kcZHKwEpzgyF@HYnx<5HbeBS<=r+}$03qv4y>OhYd8>EzovyQ)uR_izpG(5*m;-$$jS}2b z)*g8=FNMBC{rhPkYj$yGOR8NP#-2_)v5L5pF4QvQ@ItlI9Ak5Ns6Xr*esrNNsII5A z-nWURfW_@vbY_K_{&&=w#e5sLQXx>;?dA)cPO5;CJn94PkfAl#L=fP3RK>+!$m+vU zK$exZ@1((!Myo(0pO|zy#X*;QJ6PNyl}~#W3&I>U*iS_3+X2PQH4b6Z+560tjU1Hm6yO1eN$TNa^wQ5?;IH z{vq;0yv(N4Ql=|-f7Pi3C{57m)Z1Kp2}d)_o%T72A7(`Mnw|}e1n;8fVc=ph9tN(p zHK60-Fpu;1cfYc5|1DRV@GvH^!_gl)UxjJHXSY2L@0QO&h6Z7Ovxa*dP#a!08~kKb zpSR+0P`~kN4m* zdtR7RLcKO+L>csMUD}xWuiW--I7QbQWtG3@pJ=G{Spr4(MOD$(iUR2gz~l8&FW!{> z_?1*W%|^$XEti&<86r6ybvaM;J3?+<=Zu78$&1u4G*0Nyarm_?L!VH;DLztTqfU?< zZ8nOE#{`xq`QJctYwMBjY;^>bmQg3|EiEEBfH_23c=Xn53+&KG3Y}JXZ}nF94K*_k zWNIpzcdWt2wCS&IF_#`JzB^t5hqURKd}O}c;Mwm(AYA*Ip_Ut*M=lf+AVD|p{Qe^q zQu`ebzN@uLxWxJuL#2k1c{DTNTnM3o)7YlV7(n@>{E>Eluuzuu)|MPJ&Uq{EniW(|%HzwHDw zYze9UX9Eknab=VD!Q$+DGzpg>GDfAoj|KW#OoLI<`X_hqdtlzkNi`VU$#7(&5bKED&B-@@YD3uTgwQ)7Pgo z*YdzG05vSE+ZH4@-rIP9j~!yuRSUB)^ZDTVD9QQvankRhla!P!_-K>@IuM0CqbcOo zexB>^H%UN*DP=;=2lxH;N*VWaKouTK{Z`2M5jxz!_S;`)weRou=uFaS*CC>SFqc9e zZGw99dcPKir%;HerLpl#WM2$n<7@xp!SeZ!Xw=cV9va7ci*p2sQZH%2WgZ}oPoF+T zDAw)xMyBfz6mw2n#=a=D&p)8|U=%p_K>trLSFkk;Zk&{{bj<@zd|Z>$Cv*l`=ZI`v4}6N!}dnVVdHgc8M#QQ`raoX53+-W5S$f{yD;ZPLa)KCNbS z5g~z5$=Gc6Q^#niWP_)K_FT`sZJjp@By+X9;*FfYR*t?Lg_HCPrSOo33kZ8QGrnG7 z{;3XcO1Fb27xZO{i_E_EsEu8iod8n%ASI>vId2P}Ghm~AhD;lOqj|ukf%#ejGX{9^ zkm}OFu+`Zs`s?P*n#7byBBMX7Fkrv6T~|MX^r{wP+=&^Hy}0`GU2qFU7w7}Gd_Q{h z$k*1Ajz-2)F$T6gO%J3Fj6;zmAW8rFN_ZVmiXb2xUsQm+ObU^Ky-j>$k^VUffpjhm zi2mnJbyuJ*XoV2Rp7jm8WP0*fKUz{gT|xR_gkHPREO=9{X`_bny*9UDzWN!JdN*~( zM$Ibqk5C@nF6XkLZ-l2SJqKVbUm>%5E|mS=vKAm$qpVyHZX6!X!{Etx{JQ0}h0B+2 z17phLnTJtOA0uT=E)b;!V*1EC{k z?q*(rm_W-t2kJQ6t_(X>vqmLDt|RoWf37*?R1*!5$I1&)vNh{Hx7y}6US$f$VK1IY zcyfM@!uX80T#2*B=X}Fj->Zt0loj~O#?RjEufDfxZhJSv>jtUsrlb~{-xFLs8{;ep zRH96yZjc29jISf+o<3h<=@#`w}FQi`c_JLM22Js9UNRUXBZD*s9uWHKYZ6RNS=5_$Mp$jfnPw_-}6)LqD5!;7oR}3jv_Jg zt=LvrH%REb>DOyBf({}AS8bfIJ>;vdKtA2=`fzEP`Kn$RD%?f4(DYoFUU?1V={$5e zpkJ>(4G0d#c#8<_22xNCQnY ze%&**^2?$A>s@0OG5HWEvdI<=r0#Nekb|W(IqOE8rq16LF&WVDTQp;rz?9E-EFoSa zfZ$qetWH<>kZfAr%zpcON;Tz+1r_;*x##?Ad+N?FVN*5No(Kl--yPx~6YVQZ&PBGX zXt%1r@@sj?YFm4VG~m`-XtUPy@`|U}XW4}``!Em(PmNq8w-(=gtzbx_`H{MLk+U#o zg+%44;$Z?|WeW0i+PbkwnK;|4AORl=b{8?ZrxGYWH(M}O^#tF@`xU`I8Mtz)<`9cQ zqP`b+b#7X}WqIsZ{^oQmouq*}Mkfq?hLB#rDSty{{#HY{NS$e6xW+a00D4kdV!5@a z-;%~;(EXCN|RpIY^8KJz>XFFG;;(K+&jeyPCD zLZeA#2cx17&#K(T|k z5C>WvpW%1Gd=v}b))P6$3=|@3kNn#1#Y>jOR?3|lltBB?8crAbq&ciH_lzQsK}U)B~!ZP`b1J ztOtU$pgPsWL$fyO^dd5;YEDFIo7=@hH*NVLV`GGBJqT?k`Ilt`*u$<2RZk}~XY`8f z#UmZZJo#Yx20)cpJv7*U`j<27GbMuKf|YW_!;6%>_mHR zlxS}k$ptd=&mFgyR$aFK6|K>GI{kkcbOh5R*Y$G1Dub~cqpeTbklMM~y8k6x@T#RYit=jd(SlqPF@uP~Tb#!FZXY&j=K#j5wbtUKa;J=+fQXp_aKj zWD(tOVzma8dzoW-!kTi=t@XLdbhEU4WnsW(Bo;f#w3+coHu6ad6^80tO=JajN8mYD z#D-O#2TwQXJ0tSVNXk)=Q@wZ4wG&RZFSC{>vR?3=n3JRUBl4@DYtRd&!{x@W^dfI2 zV)rdtCE!&iil8X(CIVi}p>Cx+#jYCW`KhZnV(1{TLL3yEolUMFpeFfhJap%D@IuG! zplychlXo+-M5jjYW{%b(Al;1)h3Sx$uc(CZB#SpegOAq>*WGk3o{W3bQd?r{ZUD(s z*YUoI6ZbT^NQF`R;LnoC^zyXz%XC+xHprFsVUgiHkAk6zdVpllqTOTld&|q(AX!t7 ziD~_AOs&?W2%gjQgGKJ`ks0a5&X|g%I$0Mze?y>}YOh;Z4PbeAc(Sj|#HN(j_~2mg zeM}dCbc;e7;-)0(6If>3%O0abUnOR&PwlV=KH#7ardUis>v~l7Ku8#uA1RQ34Pm7; zkAEsj?sz0RK2nB%rQcFA9*HZ*tMOYk8Ggp(<7=+eaFHO`bj4$OS~tHWZUhGQQrfsl z=Vn)Fd+{o)#!JJeUHks<%bW;!Ro;=tBloKW7%w`F`)Q>Hn_O6R1 z^OdZmnB^~7-BXoGWmAn-ipX})5p2eoT;O66{OJFdpYT*=d|J=}Bw%C{zHiuEB}!$x zMDI||Vve2_kD3HB(|q}um{-a?IuTvlV@{sRAX_fSd=8mTbZ4#-Oe~hp7^l{mU(}>{ z+P1c-3yFjXo+~s{HfV6fPtOxdtmg8U#1Be+C_SP3PAyDYSTH z3f1nZd^^u0Yr*ruzZ>~@!J-p00Tc+anNHdDPfeggR=nnjdZZ%9G5;>Ls^bjvYNj=; z{U;YoCZ8jun3i~5)~mVJzQ)(>!FztX*$~4%%`pE&B2Y3r0J~U6GDVO)mA`TccaD_G zUFH^UYoX{gG&FCzzP!ff=?v!>F+jx=x$N$0t$;tF#lioM_#N3R0F#7p9_#e8B!_Db zJXYfm-uYx=Y}~y7^4gBa(`wbuUWsaJ0D+Nu#WuzWekJ$G*6t1m7;Lt$;6_F8#L5Mg8#XtYt5FV`q^ z)@tDiVb1tW=F!a*^L*ru)VuBnD_z3-d(>w`Lmv3#o*xv&I%c5+bx+nx_m)iM?oon? z`R3lvM+90wEo86$@@7!Y!`kbfnn0*r$aFr#p5EeP1?-sWrQ9>h_5My>Fp)Kczm5qP zzVLt!=PkKY+!?I7kI!blgH553MQDOQi^Onkrg?ctX|4hYW&`%e>>Z;g zk2i-)J9MpCq#>XLP!_W{kb8zDdfqAGgB-T?pvnk9A52OEmHp;r*2DZ@%eO!qtoGrV z9708WGB!u?Unzqfc>(`~mbXN)%Q)&OoT@KL97aS=tGwZB6$RFmkZ*WCAGCWCH0mv#uVoU>=AKe9suxSgo6Xu5k8 zwL)zKX?+G36>YkXKotV4;p*%dUMT^Eri5w+tCjkql;~c#~MWp%3A2>mvYVZ{GDUanGyvi~6%n}mgcoeIbCx!P{qvOGpQprz zv=zSWrd@&d+tiIr-k@AjULQCR2UNG<&V>Cnj#24K>Akj?GhU(zP15SN-|$45S$caH zFYv4YVT^0nA|c6i|2O`CRg^_2mHeNq4{e@Zz6Iiex{kEw6Nf4x7nRd%(AbcQ?};k=9Vjsi=0%vCy~FzP$elGeIK>r$tDad5cjufAqKG+b(u7 zqtiB>86c4V83Lf)R(G1E@}p^rWIL=;P|PHmC7Qe6O`X7UugLmd;o z79K>t;M$cgt$eu1k_J%j)&{z!%;cQ%G^qIY0Ge<*{KJuAC@G?jycmMjDvX~G{qX3t zFC9HW57!Y|R41ZU_1AxcKfQnM}#7P_^h!CU!TcF#PPCA`FJ3 zAx1DO!}&awbq@!hGMNY^x8bzGV~NGIpZ&qwB)*ICFmvBB;y9^kiGjJ4Lw-NRIlzvqJ#4f@RgPZ)l} z57z%NE8;DjfhGyH)h>a14^=6`j?_O0L=#OlF~MbNx_^=X0FhEeFMiS1u-#}bIZbNE z#{2}TbpBKJ^!@0z)je_Zui)TI?|n{h|8^>^=pd>`xb{FbvqHck?F zxS0YT8ySx4okO|`W^ueN{9lNsq6nO`2bgX2ErI243QHug@^Ww>pbAkU_d7P#Y;E<4 zNr4}7KwDdO|1}X>`2NHf<>jPqN?sSRn;g81%K!3Xpo~<{$qV@Hf@#{>za8IM_|LOj zNeX;UD_piZJYaH2YFb)9f+Bol?45e9{gnwS1cAC~beSE`ma&iDM+x6{h|>BgD!P5r z`tx752TnRkkqSVI?Xq!6xYhfO=GU!|Ou;44cpe=S^EMttF;X&AhW`S~Habz3I|7h~ zlhU*`#8hJV4KwgRM-ESZ)&eG(i5x@j{a;_Fz6i$dzS@k4LeC*1Bhm9reddQ z#M?esK!f(d`5*>2mj3dIBm_O6)cB`}Qxd0-{O#NyfQjKaph%$#@@10}qWQ+npvrG; zxm{Apzoxnm9fF639_$}Pyc*t1yZpZBN_$nHcD}vwk^T0aHpM_ActmASt35Uc5P*7J12X@cXwLGiKcY9Zbeo9#zFo zU$@)d)3JX#hokZm`x*E;^AmbfwS!LaR8H9~DGIf;>e6*|bj<3vr}YmSnP+2kjjd>n zX)tJSymWcjEdmtzp+@a4*o@orfDic{A4HMsaSsnsn`>RXzxQDw1Jlj?%5KmCyVKm+ zpBHXh4(;dzsNLJ@A6lB6+y>XU4)Lzu#sAH3$ebbcANVc0d4wC+QF&Q;r<)fFc772O z{Vpy;3aKN%TYuW#UH1GR_}sx&!b4Q(b!YWwb0sH!bV9?ChuAndv&%nM#E@mK0KMWC z>Fk@bpruzWXIrQ0QV+8OMIW%paudG{?bnAW7u&_MdShDCM^$;bUL-juX1fNFM_vOl z0y$;GIufCj_zdMy?5Zfl%-wnvE^6n`hbO43L^#S@TiYpo+53YY*&G43cLSytK<_-G zx;Ye1Q91l1B9+}JKCav>PAgbY@LLW<;p!(Iu;oAW!=MgB{xp8!M~`+{z8{pW5{ImY zPj#&zfQc2oQIz(1^$r-I`Um;n2`Im^x}xyX7HA8e+oSX2fWS#83MTbil5_U&-@hdL zx^0u?(L&FY>v8Q&Y}L0bKWB}JT&S!cNSS=Qp-WnwDJL8ej!Q}7$}!G;*#M=jk2I>7z+IWgS)Dq9;Sd&RHko7cXckgq5Q$$&`ux8)4D!8*D=XA z73CU8TOzat;{Sm|58i}@p|uE5AH!$AdvLa(UH~3oWxy9T%D!&mBT#vHXPOu-ur#`c zmSzBhq}{F^MOyARld=7}ncx2VI#4qPIAi*c$8-qj(xO*e<-Tcl0bkw?kPT%fo!i8F z+8NmYT^N|h3Pi$M(lXQ>`5^}Kf78B}7k3C@&jXSM|f6Df_Px4jckp8ixGA zvg@p<)eGdhqkrYuo0lz51r7k7vcBT%`+CW3u^b@{f1&2P;O;%;06J$eb1G)pfajSV z!NRuHhG~iX=4Ea(P0*lCL~nz3PIA#8DKmz)5Pqv;Io(4S!CC^8sPxZ4-yu%F-f9LP z?OB+>>ualj{`^QkJFQ8v#p-O9a48siosZPhV+`cew);Epqw#sS+zmlLiKI!et_{{~ zU%}kBK2N*74TAgq(3)I1I%+evRx&#mSLI3D54F{c`wA=54Kn&rVdxm#ebV!z%3Ff) zo5bS}O@596#5m{wzBc?93q9up1`7Up$tDfnetrm;trpBTg(0F>D3LnV1EqhJ-%hL= zixbVs&SiNAeuK3IrU8spYHri?)!!Jzvo&u6=GlWvcIWa-K7xd=dD~L`z5x!PBiE+F z>@D9M6gwRrnJ%H#ZWOV_fZEMF-FbOvJJ%{Oh!2eRx&Jp=ImQG?CKaH7X;0B@Lf6iV04?-clt$^aQ zOYkrR^b_P0p(Fe3KmY6z);xdqsw4MbJ_P30q(rBjy%0i1i3~#rXlf1?@12qOR6f$>*4xf|XKi)f22jn!s53+D!c!gm#DcG1zmCopVa&B)-~M9k z`~!5yJ>etOM+n2pUkm}V|8@9E`Gj)L+Au5X-5BX|)!%W%@`N5TqXYul6uPw;kZ<`& zet_tJUCZbUD?3yJH8Q+%}BD? zAOZULJVf~ReVP9ViOnk~nbKPY5)tIJiMoDg)bWW{7z7{~%xwclRc-q>h8g=S@Z)h7 z@Z_^Ky-U30PvM<#n!D>VkzgpIns%ytuU5dO}-Un{S@V{&JNDZu3o- z#?i>zjjy~Lpqt{s@f8+?bQ>#k1rVg<$)2SZq^S1mD~L4w?38C!&qAgT-*g8UCO?RP zK@_6%J4|-kr69Uf$jutN=Ha*{(v&n-ACm)}Sb|J>&FlY_VUHhvVm90XBdaKTt-~w| zZgs*m+zF2Fc#F3MUAUc zQVtq}$r*l4$QeG)Yz^~wgcV9cW~LpwbG;?c#emS4B;H<`o{&ese zbjB{qLZ++AR)^Cm$gz9L2F979+*lH*uY8hZB3OIN?}_k|LV|?W!hEkit4B62_~3JV zWS6|Qn{t?>+oV4lNtexjNSO1Q*&hNb+wD?)x z%CA>*MYD|9aP*U$K4k-(I+ul>OK_ZxdIukN`1VtU-`rLX6@&2Y_=2zjh z6?^~q*BP(W-kqu+U9@YH`t0Y|EX?=C9#AblzxM0gN9Q9?-@382@Mp!^uHNfAS3hb~ zKd|w}&5ac^4R7DKTj;muYsSBoUyuLTxB9A?`18K^>fOnQS-c;6|K0lT(vP738^1oE zQ(T$-+sOX;`S}meU48lde zk9l?cx8>s6QwwD0uSr+?KIdcEy%|}%u3LSd<9p}w%AH^5{H)ya_4vlJzHv3+LS`pWx*d)GcM)6czsr}VA$irZ^eA2Bh__Om>G_U0_>cT10%WzTh!?@$KP_?LRH2{XJ}xd`H0e+R8#nY4^70 zH|*Z^Pn+>)?WY|6_^-QIj@e$Vy%Vil`?czXer@6VQu*&m-fzBs{c2}#XLmk7KY#b^ z*|R_X`Soj8fAx;vfA-&h|Lz@MR_fd5&z~Ee2hP90sW_H$(c;)=V6K1n@#DsR`Q!gv y{`a0QtLb0y`0xf{0!?OYWdXcU7}kvYz7NdfGX-)5j+CEZ00K`}KbLh*2~7YvKDT86 literal 0 HcmV?d00001 diff --git a/server/scripts/lint.sh b/server/scripts/lint.sh new file mode 100755 index 000000000..5721aa419 --- /dev/null +++ b/server/scripts/lint.sh @@ -0,0 +1,12 @@ +#!/bin/sh + +# Run the configured linters. +# +# These checks will run when a PR is opened and new commits are pushed, but it +# can also be run locally. + +set -o errexit +set -o xtrace + +go vet ./... +go run honnef.co/go/tools/cmd/staticcheck@latest ./... diff --git a/server/scripts/museum.service b/server/scripts/museum.service new file mode 100644 index 000000000..6f66133fe --- /dev/null +++ b/server/scripts/museum.service @@ -0,0 +1,23 @@ +[Unit] +Documentation=https://github.com/ente-io/museum +Requires=docker.service +After=docker.service +# Don't automatically restart if it fails more than 5 times in 10 minutes. +StartLimitIntervalSec=600 +StartLimitBurst=5 + +[Service] +Restart=on-failure +ExecStartPre=docker pull rg.fr-par.scw.cloud/ente/museum-prod +ExecStartPre=-docker stop museum +ExecStartPre=-docker rm museum +ExecStart=docker run --name museum \ + -e ENVIRONMENT=production \ + --hostname "%H" \ + -p 443:443 \ + -p 2112:2112 \ + -v /root/museum/credentials:/credentials:ro \ + -v /root/museum/credentials.yaml:/credentials.yaml:ro \ + -v /root/museum/data:/data:ro \ + -v /root/var:/var \ + rg.fr-par.scw.cloud/ente/museum-prod diff --git a/server/tools/abort-unfinished-multipart-uploads/README.md b/server/tools/abort-unfinished-multipart-uploads/README.md new file mode 100644 index 000000000..3211bfadc --- /dev/null +++ b/server/tools/abort-unfinished-multipart-uploads/README.md @@ -0,0 +1,19 @@ +Find all pending multipart uploads, and abort any of them that are older than x +days. + +This shouldn't be needed in practice since we already track and clear +temp_objects. However, in rare cases it might happen that museum gets restarted +in the middle of a multipart replication. This tool can be used to list and +clean up such stale replication attempts. + +## Running + + go run tools/abort-unfinished-multipart-uploads/main.go \ + --profile my-profile --endpoint-url https://s3.example.org --bucket my-bucket + +For more details, see `ParseAndCreateSession`. + +To see all the uploads which'll get aborted, you can + + go run tools/abort-unfinished-multipart-uploads/main.go \ + --profile p --endpoint-url e --bucket b | grep 'Dry run:' diff --git a/server/tools/abort-unfinished-multipart-uploads/main.go b/server/tools/abort-unfinished-multipart-uploads/main.go new file mode 100644 index 000000000..172b6b5b5 --- /dev/null +++ b/server/tools/abort-unfinished-multipart-uploads/main.go @@ -0,0 +1,89 @@ +package main + +import ( + "flag" + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/ente-io/museum/tools/pkg/cli" +) + +func main() { + bucket := "" + confirm := false + + flag.StringVar(&bucket, "bucket", "", + "Bucket to delete from") + + flag.BoolVar(&confirm, "confirm", false, + "By default, the tool does a dry run. Set this to true to do the actual abort") + + sess, err := cli.ParseAndCreateSession("", false) + if err != nil { + return + } + + if bucket == "" { + fmt.Printf("Error: no bucket specified (hint: use `--bucket`)\n") + return + } + + s3Client := s3.New(sess) + + // - List all multipart uploads + // - Delete the ones that are older than x days (but only if `--confirm` is specified) + + listOut, err := s3Client.ListMultipartUploads(&s3.ListMultipartUploadsInput{ + Bucket: aws.String(bucket), + }) + if err != nil { + fmt.Printf("ListMultipartUploads %s error: %s\n", bucket, err) + return + } + + fmt.Printf("ListMultipartUploads: %v\n", listOut) + + if listOut.IsTruncated != nil && *listOut.IsTruncated { + fmt.Printf("Warning: Found more than 1000 pending multipart uploads. We were not expecting this many.") + } + + // 20 days ago + cutoff := time.Now().AddDate(0, 0, -20) + fmt.Printf("Cutoff: %v\n", cutoff) + + for _, upload := range listOut.Uploads { + fmt.Printf("Processing multipart upload key %v id %v initiated %v\n", + *upload.Key, *upload.UploadId, *upload.Initiated) + if upload.Initiated.After(cutoff) { + fmt.Printf("Skipping multipart upload since it was initated (%v) after cutoff (%v)\n", + *upload.Initiated, cutoff) + continue + } + + if confirm { + abortMultipartUpload(s3Client, bucket, *upload.Key, *upload.UploadId) + } else { + fmt.Printf("Dry run: AbortMultipartUpload: %v/%v/%v\n", bucket, + *upload.Key, + *upload.UploadId) + } + } +} + +func abortMultipartUpload(s3Client *s3.S3, bucket string, key string, uploadId string) error { + _, err := s3Client.AbortMultipartUpload(&s3.AbortMultipartUploadInput{ + Bucket: &bucket, + Key: &key, + UploadId: &uploadId, + }) + if err != nil { + fmt.Printf("AbortMultipartUpload failed: key %v id %v: %v\n", key, uploadId, err) + return err + } + + fmt.Printf("AbortMultipartUpload success key %v id %v\n", + key, uploadId) + return nil +} diff --git a/server/tools/gen-random-keys/README.md b/server/tools/gen-random-keys/README.md new file mode 100644 index 000000000..0cfcbf051 --- /dev/null +++ b/server/tools/gen-random-keys/README.md @@ -0,0 +1,9 @@ +Generate random keys that can be used in the museum configuration file. + +## Details + +This tool can be used to generate new random values for various cryptographic +secrets that should be overridden in `configuration/local.yaml` when running a +new instance of museum. + + go run tools/gen-random-keys/main.go diff --git a/server/tools/gen-random-keys/main.go b/server/tools/gen-random-keys/main.go new file mode 100644 index 000000000..167978974 --- /dev/null +++ b/server/tools/gen-random-keys/main.go @@ -0,0 +1,39 @@ +package main + +import ( + "encoding/base64" + "fmt" + "log" + + "github.com/ente-io/museum/pkg/utils/auth" + + generichash "github.com/GoKillers/libsodium-go/cryptogenerichash" + secretbox "github.com/GoKillers/libsodium-go/cryptosecretbox" + "github.com/GoKillers/libsodium-go/sodium" +) + +func main() { + sodium.Init() + + keyBytes, err := auth.GenerateRandomBytes(secretbox.CryptoSecretBoxKeyBytes()) + if err != nil { + log.Fatal(err) + } + key := base64.StdEncoding.EncodeToString(keyBytes) + + hashBytes, err := auth.GenerateRandomBytes(generichash.CryptoGenericHashBytesMax()) + if err != nil { + log.Fatal(err) + } + hash := base64.StdEncoding.EncodeToString(hashBytes) + + jwtBytes, err := auth.GenerateRandomBytes(secretbox.CryptoSecretBoxKeyBytes()) + if err != nil { + log.Fatal(err) + } + jwt := base64.URLEncoding.EncodeToString(jwtBytes) + + fmt.Printf("key.encryption: %s\n", key) + fmt.Printf("key.hash: %s\n", hash) + fmt.Printf("jwt.secret: %s\n", jwt) +} diff --git a/server/tools/pkg/cli/aws.go b/server/tools/pkg/cli/aws.go new file mode 100644 index 000000000..599e3543a --- /dev/null +++ b/server/tools/pkg/cli/aws.go @@ -0,0 +1,92 @@ +package cli + +import ( + "flag" + "fmt" + "os" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" +) + +// ParseAndCreateSession returns a Session object, emulating AWS CLI +// configuration. +// +// This is a convenience method to create CLI tools that behave similar to AWS +// CLI tools in where they pick up their configuration and credential from. +// +// It'll add and parse two command line flags: `--profile` and `--endpoint-url`. +// +// Beyond that, the method will pick up the S3 configuration and credentials +// from the same standard places where aws-cli looks for them: +// +// https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html +// +// As a tldr, the easiest way to use this might be to add a new AWS profile: +// +// # ~/.aws/config +// [profile wasabi-test-compliance] +// region = eu-central-2 +// +// # ~/.aws/credentials +// [wasabi-test-compliance] +// aws_access_key_id = test +// aws_secret_access_key = test +// +// And `export AWS_PROFILE=wasabi-test-compliance`, or provide it to the +// commands via the `--profile` flag. +// +// Alternatively, if you don't wish to use AWS profiles, then you can provide +// these values using the standard AWS environment variables. +// +// export AWS_REGION=eu-central-2 +// export AWS_ACCESS_KEY_ID=test +// export AWS_SECRET_ACCESS_KEY=test +// +// > Tip: If your shell is configured to do so, you can add a leading space ` +// +// export AWS_SECRET_....` to avoid preserving these secrets in your shell +// history. +// +// The endpoint to connect to can be either passed as an (optional) method +// parameter, or can be specified at runtime using the `--endpoint-url` flag. +// +// S3ForcePathStyle can be set to true when connecting to locally running MinIO +// instances where each bucket will not have a DNS. +func ParseAndCreateSession(endpointURL string, S3ForcePathStyle bool) (*session.Session, error) { + logLevel := aws.LogDebugWithHTTPBody + cliProfile := flag.String("profile", "AWS_PROFILE", + "The profile to use from the S3 config file") + cliEndpointURL := flag.String("endpoint-url", "", + "The root URL of the S3 compatible API (excluding the bucket)") + flag.Parse() + + profile := *cliProfile + if profile == "" { + profile = os.Getenv("AWS_PROFILE") + } + + // Override the passed in value with the CLI always. + if *cliEndpointURL != "" { + endpointURL = *cliEndpointURL + } + + fmt.Printf("Using profile %s, endpoint %s\n", profile, endpointURL) + + sess, err := session.NewSessionWithOptions(session.Options{ + Profile: profile, + // Needed to read region from .aws/profile + SharedConfigState: session.SharedConfigEnable, + Config: aws.Config{ + Endpoint: aws.String(endpointURL), + S3ForcePathStyle: aws.Bool(S3ForcePathStyle), + LogLevel: &logLevel, + }, + }) + if err != nil { + fmt.Printf("NewSessionWithOptions error: %s\n", err) + return sess, err + } + + return sess, err +} diff --git a/server/tools/test-wasabi-compliance/README.md b/server/tools/test-wasabi-compliance/README.md new file mode 100644 index 000000000..97862a2f9 --- /dev/null +++ b/server/tools/test-wasabi-compliance/README.md @@ -0,0 +1,30 @@ +Run through the process of using [Wasabi's compliance +feature](https://wasabi.com/wp-content/themes/wasabi/docs/API_Guide/index.html#t=topics%2FCompliance.htm&rhsyns=%20) +to ensure that it does indeed behave the way we expect it to. + +Also acts as a test for the code in `pkg/external/wasabi`. + +## What does it do? + +The command runs through the two checklist: + +* First checklist is for enabling compliance on the bucket, adding a new object, + and then disabling the conditional hold on that object (See `Sequence 1` for + the full sequence that'll be run through). + +* Second checklist is for deleting the object. This checklist can be executed by + running the command with the `--only-delete` flag (See `Sequence 2` for the + full sequence). + +Since the minimum retention duration is 1 day, these two checklists need to be +manually run through after the requisite gap. + +## Running + +Use the `--profile` flag (or set the `AWS_PROFILE` environment variable) to +specify which AWS config and credentials to use: + + go run tools/test-wasabi-compliance/main.go --profile my-test-profile + +For more details about how to profiles work, or alternative ways to provide +credentials, see the documentation for `ParseAndCreateSession`. diff --git a/server/tools/test-wasabi-compliance/main.go b/server/tools/test-wasabi-compliance/main.go new file mode 100644 index 000000000..7af2f4841 --- /dev/null +++ b/server/tools/test-wasabi-compliance/main.go @@ -0,0 +1,305 @@ +package main + +import ( + "flag" + "fmt" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go/service/s3/s3manager" + "github.com/ente-io/museum/pkg/external/wasabi" + "github.com/ente-io/museum/tools/pkg/cli" +) + +const ( + bucket = "ente-compliance-test" + objectKey = "compliance-test-object" + objectValue = "test-object-contents" +) + +func main() { + // Can be overridden on the command line with `--endpoint-url` + endpointURL := "https://s3.eu-central-2.wasabisys.com" + + onlyDelete := false + flag.BoolVar(&onlyDelete, "only-delete", false, + "If true, then we will only delete the object from the test bucket (sequence 2)") + + sess, err := cli.ParseAndCreateSession(endpointURL, false) + if err != nil { + return + } + + s3Client := s3.New(sess) + + if !onlyDelete { + err = sequence1(s3Client) + } else { + err = sequence2(s3Client) + } + if err != nil { + return + } + + fmt.Println("Checklist completed successfully") +} + +// # Sequence 1 +// +// - Get and print the current compliance settings for the test Wasabi bucket. +// - Update the compliance settings to set RetentionDays = 1. +// - Get and verify the updated settings. +// - Put an object into the bucket. +// - Ensure it cannot be deleted or overwritten. +// - Get and print the compliance settings for the test object in this bucket. +// - Disable the conditional hold for the object. +// - Ensure it still cannot be deleted (we'll need to wait for a day). +// - Print and verify the updated compliance settings. +func sequence1(s3Client *s3.S3) error { + _, err := getAndPrintBucketCompliance(s3Client) + if err != nil { + return err + } + + err = enableBucketCompliance(s3Client) + if err != nil { + return err + } + + err = verifyBucketComplianceEnabled(s3Client) + if err != nil { + return err + } + + err = putObject(s3Client) + if err != nil { + return err + } + + err = deleteObjectExpectingFailure(s3Client) + if err != nil { + return err + } + + err = putObjectExpectingFailure(s3Client) + if err != nil { + return err + } + + _, err = getAndPrintObjectCompliance(s3Client) + if err != nil { + return err + } + + err = disableObjectConditionalHold(s3Client) + if err != nil { + return err + } + + err = deleteObjectExpectingFailure(s3Client) + if err != nil { + return err + } + + err = verifyExpectedObjectCompliance(s3Client) + if err != nil { + return err + } + + return nil +} + +// # Sequence 2 +// +// - Get and print the object's info. +// - Delete the object. This time it should succeed. +func sequence2(s3Client *s3.S3) error { + _, err := getAndPrintObjectCompliance(s3Client) + if err != nil { + return err + } + + err = deleteObject(s3Client) + if err != nil { + return err + } + + return nil +} + +// Operations + +func getAndPrintBucketCompliance(s3Client *s3.S3) (*wasabi.GetBucketComplianceOutput, error) { + out, err := wasabi.GetBucketCompliance(s3Client, &wasabi.GetBucketComplianceInput{ + Bucket: aws.String(bucket), + }) + if err != nil { + fmt.Printf("GetBucketCompliance %s error: %s\n", bucket, err) + return nil, err + } + + fmt.Printf("GetBucketComplianceOutput: %v\n", out) + return out, nil +} + +func enableBucketCompliance(s3Client *s3.S3) error { + out, err := wasabi.PutBucketCompliance(s3Client, &wasabi.PutBucketComplianceInput{ + Bucket: aws.String(bucket), + BucketComplianceConfiguration: &wasabi.BucketComplianceConfiguration{ + Status: aws.String(wasabi.BucketComplianceStatusEnabled), + RetentionDays: aws.Int64(1), + ConditionalHold: aws.Bool(true), + }, + }) + if err != nil { + fmt.Printf("PutBucketCompliance %s error: %s\n", bucket, err) + return err + } + + fmt.Printf("PutBucketComplianceOutput: %v\n", out) + return nil +} + +func verifyBucketComplianceEnabled(s3Client *s3.S3) error { + out, err := getAndPrintBucketCompliance(s3Client) + if err != nil { + return err + } + + if *out.Status != wasabi.BucketComplianceStatusEnabled { + err := fmt.Errorf("expected Status to be %q, got %q", + string(wasabi.BucketComplianceStatusEnabled), *out.Status) + fmt.Printf("Error: %s\n", err) + return err + } + + if *out.RetentionDays != 1 { + err = fmt.Errorf("expected Status to be %d, got %d", 1, *out.RetentionDays) + fmt.Printf("Error: %s\n", err) + return err + + } + + if !*out.ConditionalHold { + err = fmt.Errorf("expected ConditionalHold to be %t, got %t", + true, *out.ConditionalHold) + fmt.Printf("Error: %s\n", err) + return err + } + + return nil +} + +func putObject(s3Client *s3.S3) error { + uploader := s3manager.NewUploaderWithClient(s3Client) + + out, err := uploader.Upload(&s3manager.UploadInput{ + Bucket: aws.String(bucket), + Key: aws.String(objectKey), + Body: aws.ReadSeekCloser(strings.NewReader(objectValue)), + }) + if err != nil { + fmt.Printf("Upload %s/%s error: %s\n", bucket, objectKey, err) + return err + } + + fmt.Printf("UploadOutput: %v\n", out) + return nil +} + +func putObjectExpectingFailure(s3Client *s3.S3) error { + uploader := s3manager.NewUploaderWithClient(s3Client) + + out, err := uploader.Upload(&s3manager.UploadInput{ + Bucket: aws.String(bucket), + Key: aws.String(objectKey), + Body: aws.ReadSeekCloser(strings.NewReader(objectValue)), + }) + if err == nil { + err = fmt.Errorf("expected Upload %s/%s to fail because of compliance being enabled, but it succeeded with output: %v", + bucket, objectKey, out) + fmt.Printf("Error: %s\n", err) + return err + } + + fmt.Printf("UploadError (expected): %v\n", err) + return nil +} + +func deleteObject(s3Client *s3.S3) error { + out, err := s3Client.DeleteObject(&s3.DeleteObjectInput{ + Bucket: aws.String(bucket), + Key: aws.String(objectKey), + }) + if err != nil { + fmt.Printf("DeleteObject %s/%s error: %s\n", bucket, objectKey, err) + return err + } + + fmt.Printf("DeleteObjectOutput: %v\n", out) + return nil +} + +func deleteObjectExpectingFailure(s3Client *s3.S3) error { + out, err := s3Client.DeleteObject(&s3.DeleteObjectInput{ + Bucket: aws.String(bucket), + Key: aws.String(objectKey), + }) + if err == nil { + err = fmt.Errorf("expected DeleteObject %s/%s to fail because of compliance being enabled, but it succeeded with output: %s", + bucket, objectKey, out) + fmt.Printf("Error: %s\n", err) + return err + } + + fmt.Printf("DeleteObjectError (expected): %v\n", err) + return nil +} + +func getAndPrintObjectCompliance(s3Client *s3.S3) (*wasabi.GetObjectComplianceOutput, error) { + out, err := wasabi.GetObjectCompliance(s3Client, &wasabi.GetObjectComplianceInput{ + Bucket: aws.String(bucket), + Key: aws.String(objectKey), + }) + if err != nil { + fmt.Printf("GetObjectCompliance %s error: %s\n", bucket, err) + return nil, err + } + + fmt.Printf("GetObjectComplianceOutput: %v\n", out) + return out, nil +} + +func disableObjectConditionalHold(s3Client *s3.S3) error { + out, err := wasabi.PutObjectCompliance(s3Client, &wasabi.PutObjectComplianceInput{ + Bucket: aws.String(bucket), + Key: aws.String(objectKey), + ObjectComplianceConfiguration: &wasabi.ObjectComplianceConfiguration{ + ConditionalHold: aws.Bool(false), + }, + }) + if err != nil { + fmt.Printf("PutObjectCompliance %s error: %s\n", bucket, err) + return err + } + + fmt.Printf("PutObjectComplianceOutput: %v\n", out) + return nil +} + +func verifyExpectedObjectCompliance(s3Client *s3.S3) error { + out, err := getAndPrintObjectCompliance(s3Client) + if err != nil { + return err + } + + if *out.ConditionalHold { + err = fmt.Errorf("expected ConditionalHold to be %t, got %t", + false, *out.ConditionalHold) + fmt.Printf("Error: %s\n", err) + return err + } + + return nil +}