Merge remote-tracking branch 'origin/main' into beta

This commit is contained in:
Prateek Sunal 2024-03-15 19:58:35 +05:30
commit 2a84f7810d
68 changed files with 1146 additions and 85 deletions

View file

@ -0,0 +1,24 @@
name: "Release (copycat-db)"
on:
workflow_dispatch: # Run manually
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
name: Check out code
- uses: mr-smithers-excellent/docker-build-push@v6
name: Build & Push
with:
dockerfile: infra/copycat-db/Dockerfile
directory: infra/copycat-db
image: ente/copycat-db
registry: rg.fr-par.scw.cloud
enableBuildKit: true
buildArgs: GIT_COMMIT=${GITHUB_SHA}
tags: ${GITHUB_SHA}, latest
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}

View file

@ -407,7 +407,6 @@
"hearUsWhereTitle": "Wie hast du von Ente erfahren? (optional)",
"hearUsExplanation": "Wir tracken keine App-Installationen. Es würde uns jedoch helfen, wenn du uns mitteilst, wie du von uns erfahren hast!",
"waitingForBrowserRequest": "Warten auf Browseranfrage...",
"launchPasskeyUrlAgain": "Passwort-URL erneut starten",
"passkey": "Passkey",
"developerSettingsWarning": "Sind Sie sicher, dass Sie die Entwicklereinstellungen ändern möchten?",
"developerSettings": "Entwicklereinstellungen",

View file

@ -145,6 +145,7 @@
"lostDeviceTitle": "デバイスを紛失しましたか?",
"twoFactorAuthTitle": "2 要素認証",
"passkeyAuthTitle": "パスキー認証",
"verifyPasskey": "パスキーの認証",
"recoverAccount": "アカウントを回復",
"enterRecoveryKeyHint": "回復キーを入力",
"recover": "回復",
@ -407,7 +408,7 @@
"hearUsWhereTitle": "Ente についてどのようにお聞きになりましたか?(任意)",
"hearUsExplanation": "私たちはアプリのインストールを追跡していません。私たちをお知りになった場所を教えてください!",
"waitingForBrowserRequest": "ブラウザのリクエストを待っています...",
"launchPasskeyUrlAgain": "パスキーのURLを再度起動する",
"waitingForVerification": "認証を待っています...",
"passkey": "パスキー",
"developerSettingsWarning": "開発者向け設定を変更してもよろしいですか?",
"developerSettings": "開発者向け設定",

View file

@ -145,6 +145,7 @@
"lostDeviceTitle": "Perdeu seu dispositivo?",
"twoFactorAuthTitle": "Autenticação de dois fatores",
"passkeyAuthTitle": "Autenticação via Chave de acesso",
"verifyPasskey": "Verificar chave de acesso",
"recoverAccount": "Recuperar conta",
"enterRecoveryKeyHint": "Digite sua chave de recuperação",
"recover": "Recuperar",
@ -407,7 +408,7 @@
"hearUsWhereTitle": "Como você ouviu sobre o Ente? (opcional)",
"hearUsExplanation": "Não rastreamos instalações do aplicativo. Seria útil se você nos contasse onde nos encontrou!",
"waitingForBrowserRequest": "Aguardando solicitação do navegador...",
"launchPasskeyUrlAgain": "Iniciar a URL de chave de acesso novamente",
"waitingForVerification": "Esperando por verificação...",
"passkey": "Chave de acesso",
"developerSettingsWarning": "Tem certeza de que deseja modificar as configurações de Desenvolvedor?",
"developerSettings": "Configurações de desenvolvedor",

View file

@ -145,6 +145,7 @@
"lostDeviceTitle": "丢失了设备吗?",
"twoFactorAuthTitle": "双因素认证",
"passkeyAuthTitle": "通行密钥认证",
"verifyPasskey": "验证通行密钥",
"recoverAccount": "恢复账户",
"enterRecoveryKeyHint": "输入您的恢复密钥",
"recover": "恢复",
@ -407,7 +408,7 @@
"hearUsWhereTitle": "您是如何知道Ente的 (可选的)",
"hearUsExplanation": "我们不跟踪应用程序安装情况。如果您告诉我们您是在哪里找到我们的,将会有所帮助!",
"waitingForBrowserRequest": "正在等待浏览器请求...",
"launchPasskeyUrlAgain": "再次启动 通行密钥 URL",
"waitingForVerification": "等待验证...",
"passkey": "通行密钥",
"developerSettingsWarning": "您确定要修改开发者设置吗?",
"developerSettings": "开发者设置",

1
cli/.gitattributes vendored Normal file
View file

@ -0,0 +1 @@
docs/generated/*.md linguist-generated=true

View file

@ -29,6 +29,9 @@ var _userDetailsCmd = &cobra.Command{
flags.UserEmail = f.Value.String()
}
})
if flags.UserEmail == "" {
return fmt.Errorf("user email is required")
}
return ctrl.GetUserId(context.Background(), *flags)
},
}
@ -47,14 +50,55 @@ var _disable2faCmd = &cobra.Command{
flags.UserEmail = f.Value.String()
}
})
fmt.Println("Not supported yet")
return nil
if flags.UserEmail == "" {
return fmt.Errorf("user email is required")
}
return ctrl.Disable2FA(context.Background(), *flags)
},
}
var _deleteUser = &cobra.Command{
Use: "delete-user",
Short: "Delete a user",
RunE: func(cmd *cobra.Command, args []string) error {
recoverWithLog()
var flags = &model.AdminActionForUser{}
cmd.Flags().VisitAll(func(f *pflag.Flag) {
if f.Name == "admin-user" {
flags.AdminEmail = f.Value.String()
}
if f.Name == "user" {
flags.UserEmail = f.Value.String()
}
})
if flags.UserEmail == "" {
return fmt.Errorf("user email is required")
}
return ctrl.DeleteUser(context.Background(), *flags)
},
}
var _listUsers = &cobra.Command{
Use: "list-users",
Short: "List all users",
RunE: func(cmd *cobra.Command, args []string) error {
recoverWithLog()
var flags = &model.AdminActionForUser{}
cmd.Flags().VisitAll(func(f *pflag.Flag) {
if f.Name == "admin-user" {
flags.AdminEmail = f.Value.String()
}
})
return ctrl.ListUsers(context.Background(), *flags)
},
}
var _updateFreeUserStorage = &cobra.Command{
Use: "update-subscription",
Short: "Update subscription for the free user",
Short: "Update subscription for user",
Long: "Update subscription for the free user. If you want to apply specific limits, use the `--no-limit False` flag",
RunE: func(cmd *cobra.Command, args []string) error {
recoverWithLog()
var flags = &model.AdminActionForUser{}
@ -70,6 +114,9 @@ var _updateFreeUserStorage = &cobra.Command{
noLimit = strings.ToLower(f.Value.String()) == "true"
}
})
if flags.UserEmail == "" {
return fmt.Errorf("user email is required")
}
return ctrl.UpdateFreeStorage(context.Background(), *flags, noLimit)
},
}
@ -78,13 +125,16 @@ func init() {
rootCmd.AddCommand(_adminCmd)
_ = _userDetailsCmd.MarkFlagRequired("admin-user")
_ = _userDetailsCmd.MarkFlagRequired("user")
_userDetailsCmd.Flags().StringP("admin-user", "a", "", "The email of the admin user. (required)")
_userDetailsCmd.Flags().StringP("admin-user", "a", "", "The email of the admin user. ")
_userDetailsCmd.Flags().StringP("user", "u", "", "The email of the user to fetch details for. (required)")
_disable2faCmd.Flags().StringP("admin-user", "a", "", "The email of the admin user. (required)")
_listUsers.Flags().StringP("admin-user", "a", "", "The email of the admin user. ")
_disable2faCmd.Flags().StringP("admin-user", "a", "", "The email of the admin user. ")
_disable2faCmd.Flags().StringP("user", "u", "", "The email of the user to disable 2FA for. (required)")
_updateFreeUserStorage.Flags().StringP("admin-user", "a", "", "The email of the admin user. (required)")
_deleteUser.Flags().StringP("admin-user", "a", "", "The email of the admin user. ")
_deleteUser.Flags().StringP("user", "u", "", "The email of the user to delete. (required)")
_updateFreeUserStorage.Flags().StringP("admin-user", "a", "", "The email of the admin user.")
_updateFreeUserStorage.Flags().StringP("user", "u", "", "The email of the user to update subscription for. (required)")
// add a flag with no value --no-limit
_updateFreeUserStorage.Flags().String("no-limit", "True", "When true, sets 100TB as storage limit, and expiry to current date + 100 years")
_adminCmd.AddCommand(_userDetailsCmd, _disable2faCmd, _updateFreeUserStorage)
_adminCmd.AddCommand(_userDetailsCmd, _disable2faCmd, _updateFreeUserStorage, _listUsers, _deleteUser)
}

View file

@ -25,4 +25,4 @@ ente [flags]
* [ente export](ente_export.md) - Starts the export process
* [ente version](ente_version.md) - Prints the current version
###### Auto generated by spf13/cobra on 13-Mar-2024
###### Auto generated by spf13/cobra on 14-Mar-2024

View file

@ -16,4 +16,4 @@ Manage account settings
* [ente account list](ente_account_list.md) - list configured accounts
* [ente account update](ente_account_update.md) - Update an existing account's export directory
###### Auto generated by spf13/cobra on 13-Mar-2024
###### Auto generated by spf13/cobra on 14-Mar-2024

View file

@ -16,4 +16,4 @@ ente account add [flags]
* [ente account](ente_account.md) - Manage account settings
###### Auto generated by spf13/cobra on 13-Mar-2024
###### Auto generated by spf13/cobra on 14-Mar-2024

View file

@ -18,4 +18,4 @@ ente account get-token [flags]
* [ente account](ente_account.md) - Manage account settings
###### Auto generated by spf13/cobra on 13-Mar-2024
###### Auto generated by spf13/cobra on 14-Mar-2024

View file

@ -16,4 +16,4 @@ ente account list [flags]
* [ente account](ente_account.md) - Manage account settings
###### Auto generated by spf13/cobra on 13-Mar-2024
###### Auto generated by spf13/cobra on 14-Mar-2024

View file

@ -19,4 +19,4 @@ ente account update [flags]
* [ente account](ente_account.md) - Manage account settings
###### Auto generated by spf13/cobra on 13-Mar-2024
###### Auto generated by spf13/cobra on 14-Mar-2024

View file

@ -15,8 +15,10 @@ Commands for admin actions like disable or enabling 2fa, bumping up the storage
### SEE ALSO
* [ente](ente.md) - CLI tool for exporting your photos from ente.io
* [ente admin delete-user](ente_admin_delete-user.md) - Delete a user
* [ente admin disable-2fa](ente_admin_disable-2fa.md) - Disable 2fa for a user
* [ente admin get-user-id](ente_admin_get-user-id.md) - Get user id
* [ente admin update-subscription](ente_admin_update-subscription.md) - Update subscription for the free user
* [ente admin list-users](ente_admin_list-users.md) - List all users
* [ente admin update-subscription](ente_admin_update-subscription.md) - Update subscription for user
###### Auto generated by spf13/cobra on 13-Mar-2024
###### Auto generated by spf13/cobra on 14-Mar-2024

View file

@ -0,0 +1,21 @@
## ente admin delete-user
Delete a user
```
ente admin delete-user [flags]
```
### Options
```
-a, --admin-user string The email of the admin user.
-h, --help help for delete-user
-u, --user string The email of the user to delete. (required)
```
### SEE ALSO
* [ente admin](ente_admin.md) - Commands for admin actions
###### Auto generated by spf13/cobra on 14-Mar-2024

View file

@ -9,7 +9,7 @@ ente admin disable-2fa [flags]
### Options
```
-a, --admin-user string The email of the admin user. (required)
-a, --admin-user string The email of the admin user.
-h, --help help for disable-2fa
-u, --user string The email of the user to disable 2FA for. (required)
```
@ -18,4 +18,4 @@ ente admin disable-2fa [flags]
* [ente admin](ente_admin.md) - Commands for admin actions
###### Auto generated by spf13/cobra on 13-Mar-2024
###### Auto generated by spf13/cobra on 14-Mar-2024

View file

@ -9,7 +9,7 @@ ente admin get-user-id [flags]
### Options
```
-a, --admin-user string The email of the admin user. (required)
-a, --admin-user string The email of the admin user.
-h, --help help for get-user-id
-u, --user string The email of the user to fetch details for. (required)
```
@ -18,4 +18,4 @@ ente admin get-user-id [flags]
* [ente admin](ente_admin.md) - Commands for admin actions
###### Auto generated by spf13/cobra on 13-Mar-2024
###### Auto generated by spf13/cobra on 14-Mar-2024

View file

@ -0,0 +1,20 @@
## ente admin list-users
List all users
```
ente admin list-users [flags]
```
### Options
```
-a, --admin-user string The email of the admin user.
-h, --help help for list-users
```
### SEE ALSO
* [ente admin](ente_admin.md) - Commands for admin actions
###### Auto generated by spf13/cobra on 14-Mar-2024

View file

@ -1,6 +1,10 @@
## ente admin update-subscription
Update subscription for the free user
Update subscription for user
### Synopsis
Update subscription for the free user. If you want to apply specific limits, use the `--no-limit False` flag
```
ente admin update-subscription [flags]
@ -9,7 +13,7 @@ ente admin update-subscription [flags]
### Options
```
-a, --admin-user string The email of the admin user. (required)
-a, --admin-user string The email of the admin user.
-h, --help help for update-subscription
--no-limit string When true, sets 100TB as storage limit, and expiry to current date + 100 years (default "True")
-u, --user string The email of the user to update subscription for. (required)
@ -19,4 +23,4 @@ ente admin update-subscription [flags]
* [ente admin](ente_admin.md) - Commands for admin actions
###### Auto generated by spf13/cobra on 13-Mar-2024
###### Auto generated by spf13/cobra on 14-Mar-2024

View file

@ -13,4 +13,4 @@ Authenticator commands
* [ente](ente.md) - CLI tool for exporting your photos from ente.io
* [ente auth decrypt](ente_auth_decrypt.md) - Decrypt authenticator export
###### Auto generated by spf13/cobra on 13-Mar-2024
###### Auto generated by spf13/cobra on 14-Mar-2024

View file

@ -16,4 +16,4 @@ ente auth decrypt [input] [output] [flags]
* [ente auth](ente_auth.md) - Authenticator commands
###### Auto generated by spf13/cobra on 13-Mar-2024
###### Auto generated by spf13/cobra on 14-Mar-2024

View file

@ -16,4 +16,4 @@ ente export [flags]
* [ente](ente.md) - CLI tool for exporting your photos from ente.io
###### Auto generated by spf13/cobra on 13-Mar-2024
###### Auto generated by spf13/cobra on 14-Mar-2024

View file

@ -16,4 +16,4 @@ ente version [flags]
* [ente](ente.md) - CLI tool for exporting your photos from ente.io
###### Auto generated by spf13/cobra on 13-Mar-2024
###### Auto generated by spf13/cobra on 14-Mar-2024

View file

@ -25,6 +25,69 @@ func (c *Client) GetUserIdFromEmail(ctx context.Context, email string) (*models.
}
return &res, nil
}
func (c *Client) ListUsers(ctx context.Context) ([]models.User, error) {
var res struct {
Users []models.User `json:"users"`
}
r, err := c.restClient.R().
SetContext(ctx).
SetQueryParam("sinceTime", "0").
SetResult(&res).
Get("/admin/users/")
if err != nil {
return nil, err
}
if r.IsError() {
return nil, &ApiError{
StatusCode: r.StatusCode(),
Message: r.String(),
}
}
return res.Users, nil
}
func (c *Client) DeleteUser(ctx context.Context, email string) error {
r, err := c.restClient.R().
SetContext(ctx).
SetQueryParam("email", email).
Delete("/admin/user/delete")
if err != nil {
return err
}
if r.IsError() {
return &ApiError{
StatusCode: r.StatusCode(),
Message: r.String(),
}
}
return nil
}
func (c *Client) Disable2Fa(ctx context.Context, userID int64) error {
var res interface{}
payload := map[string]interface{}{
"userID": userID,
}
r, err := c.restClient.R().
SetContext(ctx).
SetResult(&res).
SetBody(payload).
Post("/admin/user/disable-2fa")
if err != nil {
return err
}
if r.IsError() {
return &ApiError{
StatusCode: r.StatusCode(),
Message: r.String(),
}
}
return nil
}
func (c *Client) UpdateFreePlanSub(ctx context.Context, userDetails *models.UserDetails, storageInBytes int64, expiryTimeInMicro int64) error {
var res interface{}
if userDetails.Subscription.ProductID != "free" {

View file

@ -1,9 +1,7 @@
package models
type UserDetails struct {
User struct {
ID int64 `json:"id"`
} `json:"user"`
User User `json:"user"`
Usage int64 `json:"usage"`
Email string `json:"email"`
@ -14,3 +12,10 @@ type UserDetails struct {
PaymentProvider string `json:"paymentProvider"`
} `json:"subscription"`
}
type User struct {
ID int64
Email string `json:"email"`
Hash string `json:"hash"`
CreationTime int64 `json:"creationTime"`
}

View file

@ -4,6 +4,7 @@ import (
"context"
"fmt"
"github.com/ente-io/cli/internal"
"github.com/ente-io/cli/internal/api"
"github.com/ente-io/cli/pkg/model"
"github.com/ente-io/cli/utils"
"log"
@ -24,6 +25,63 @@ func (c *ClICtrl) GetUserId(ctx context.Context, params model.AdminActionForUser
return nil
}
func (c *ClICtrl) ListUsers(ctx context.Context, params model.AdminActionForUser) error {
accountCtx, err := c.buildAdminContext(ctx, params.AdminEmail)
if err != nil {
return err
}
users, err := c.Client.ListUsers(accountCtx)
if err != nil {
if apiErr, ok := err.(*api.ApiError); ok && apiErr.StatusCode == 400 && strings.Contains(apiErr.Message, "Token is too old") {
fmt.Printf("Error: old admin token, please re-authenticate using `ente account add` \n")
return nil
}
return err
}
for _, user := range users {
fmt.Printf("Email: %s, ID: %d, Created: %s\n", user.Email, user.ID, time.UnixMicro(user.CreationTime).Format("2006-01-02"))
}
return nil
}
func (c *ClICtrl) DeleteUser(ctx context.Context, params model.AdminActionForUser) error {
accountCtx, err := c.buildAdminContext(ctx, params.AdminEmail)
if err != nil {
return err
}
err = c.Client.DeleteUser(accountCtx, params.UserEmail)
if err != nil {
if apiErr, ok := err.(*api.ApiError); ok && apiErr.StatusCode == 400 && strings.Contains(apiErr.Message, "Token is too old") {
fmt.Printf("Error: old admin token, please re-authenticate using `ente account add` \n")
return nil
}
return err
}
fmt.Println("Successfully deleted user")
return nil
}
func (c *ClICtrl) Disable2FA(ctx context.Context, params model.AdminActionForUser) error {
accountCtx, err := c.buildAdminContext(ctx, params.AdminEmail)
if err != nil {
return err
}
userDetails, err := c.Client.GetUserIdFromEmail(accountCtx, params.UserEmail)
if err != nil {
return err
}
err = c.Client.Disable2Fa(accountCtx, userDetails.User.ID)
if err != nil {
if apiErr, ok := err.(*api.ApiError); ok && apiErr.StatusCode == 400 && strings.Contains(apiErr.Message, "Token is too old") {
fmt.Printf("Error: Old admin token, please re-authenticate using `ente account add` \n")
return nil
}
return err
}
fmt.Println("Successfully disabled 2FA for user")
return nil
}
func (c *ClICtrl) UpdateFreeStorage(ctx context.Context, params model.AdminActionForUser, noLimit bool) error {
accountCtx, err := c.buildAdminContext(ctx, params.AdminEmail)
if err != nil {
@ -82,6 +140,9 @@ func (c *ClICtrl) buildAdminContext(ctx context.Context, adminEmail string) (con
if err != nil {
return nil, err
}
if len(accounts) == 0 {
return nil, fmt.Errorf("no accounts found, use `account add` to add an account")
}
var acc *model.Account
for _, a := range accounts {
if a.Email == adminEmail {
@ -89,6 +150,14 @@ func (c *ClICtrl) buildAdminContext(ctx context.Context, adminEmail string) (con
break
}
}
if (len(accounts) > 1) && (acc == nil) {
return nil, fmt.Errorf("multiple accounts found, specify the admin email using --admin-user")
}
if acc == nil && len(accounts) == 1 {
acc = &accounts[0]
fmt.Printf("Assuming %s as the Admin \n------------\n", acc.Email)
}
if acc == nil {
return nil, fmt.Errorf("account not found for %s, use `account list` to list accounts", adminEmail)
}

92
desktop/docs/release.md Normal file
View file

@ -0,0 +1,92 @@
## Releases
> [!NOTE]
>
> TODO(MR): This document needs to be audited and changed as we do the first
> release from this new monorepo.
The Github Action that builds the desktop binaries is triggered by pushing a tag
matching the pattern `photos-desktop-v1.2.3`. This value should match the
version in `package.json`.
So the process for doing a release would be.
1. Create a new branch (can be named anything). On this branch, include your
changes.
2. Mention the changes in `CHANGELOG.md`.
3. Changing the `version` in `package.json` to `1.x.x`.
4. Commit and push to remote
```sh
git add package.json && git commit -m 'Release v1.x.x'
git tag v1.x.x
git push && git push --tags
```
This by itself will already trigger a new release. The GitHub action will create
a new draft release that can then be used as descibed below.
To wrap up, we also need to merge back these changes into main. So for that,
5. Open a PR for the branch that we're working on (where the above tag was
pushed from) to get it merged into main.
6. In this PR, also increase the version number for the next release train. That
is, supposed we just released `v4.0.1`. Then we'll change the version number
in main to `v4.0.2-next.0`. Each pre-release will modify the `next.0` part.
Finally, at the time of the next release, this'll become `v4.0.2`.
The GitHub Action runs on Windows, Linux and macOS. It produces the artifacts
defined in the `build` value in `package.json`.
* Windows - An NSIS installer.
* Linux - An AppImage, and 3 other packages (`.rpm`, `.deb`, `.pacman`)
* macOS - A universal DMG
Additionally, the GitHub action notarizes the macOS DMG. For this it needs
credentials provided via GitHub secrets.
During the build the Sentry webpack plugin checks to see if SENTRY_AUTH_TOKEN is
defined. If so, it uploads the sourcemaps for the renderer process to Sentry
(For our GitHub action, the SENTRY_AUTH_TOKEN is defined as a GitHub secret).
The sourcemaps for the main (node) process are currently not sent to Sentry
(this works fine in practice since the node process files are not minified, we
only run `tsc`).
Once the build is done, a draft release with all these artifacts attached is
created. The build is idempotent, so if something goes wrong and we need to
re-run the GitHub action, just delete the draft release (if it got created) and
start a new run by pushing a new tag (if some code changes are required).
If no code changes are required, say the build failed for some transient network
or sentry issue, we can even be re-run by the build by going to Github Action
age and rerun from there. This will re-trigger for the same tag.
If everything goes well, we'll have a release on GitHub, and the corresponding
source maps for the renderer process uploaded to Sentry. There isn't anything
else to do:
* The website automatically redirects to the latest release on GitHub when
people try to download.
* The file formats with support auto update (Windows `exe`, the Linux AppImage
and the macOS DMG) also check the latest GitHub release automatically to
download and apply the update (the rest of the formats don't support auto
updates).
* We're not putting the desktop app in other stores currently. It is available
as a `brew cask`, but we only had to open a PR to add the initial formula, now
their maintainers automatically bump the SHA, version number and the (derived
from the version) URL in the formula when their tools notice a new release on
our GitHub.
We can also publish the draft releases by checking the "pre-release" option.
Such releases don't cause any of the channels (our website, or the desktop app
auto updater, or brew) to be notified, instead these are useful for giving links
to pre-release builds to customers. Generally, in the version number for these
we'll add a label to the version, e.g. the "beta.x" in `1.x.x-beta.x`. This
should be done both in `package.json`, and what we tag the commit with.

View file

@ -51,7 +51,9 @@ following in your terminal:
Assuming the filename of the binary remains unmodified and the working directory
of the terminal is the location of the binary, you should type this for MacOS:
> [!NOTE] On Apple Silicon devices, Rosetta 2 may be required to run the binary.
> [!NOTE]
>
> On Apple Silicon devices, Rosetta 2 may be required to run the binary.
```
./authy-export-darwin-amd64 authy_codes.txt
@ -91,10 +93,12 @@ to ente Authenticator!
### Method 2.1: If the export worked, but the import didn't
> [!NOTE] This is intended only for users who successfully exported their codes
> using the guide in method 2, but could not import it to ente Authenticator for
> whatever reason. If the import was successful, or you haven't tried to import
> the codes yet, ignore this section.
> [!NOTE]
>
> This is intended only for users who successfully exported their codes using the
> guide in method 2, but could not import it to ente Authenticator for whatever
> reason. If the import was successful, or you haven't tried to import the codes
> yet, ignore this section.
>
> If the export itself failed, try using
> [**method 1**](#method-1-use-neerajs-export-tool) instead.

View file

@ -17,6 +17,14 @@ have the keys and secrets for the S3 bucket. The plan is as follows:
4. Create an account and increase storage quota
5. Fix potential CORS issue with your bucket
> [!NOTE]
>
> This is a community contributed guide, and some of these steps might be out of
> sync with the upstream documentation. If something is not working correctly,
> please also see the latest
> [READMEs](https://github.com/ente-io/ente/blob/main/server/README.md) in the
> repository and/or other guides in [self-hosting](/self-hosting/).
## 1. Create a `compose.yaml` file
After cloning the main repository with
@ -25,6 +33,7 @@ After cloning the main repository with
git clone https://github.com/ente-io/ente.git
# Or git clone git@github.com:ente-io/ente.git
cd ente
git submodule update --init --recursive
```
Create a `compose.yaml` file at the root of the project with the following

2
infra/.gitignore vendored Normal file
View file

@ -0,0 +1,2 @@
# macOS
.DS_Store

8
infra/README.md Normal file
View file

@ -0,0 +1,8 @@
# Infra
Various knick-knacks that we use when hosting our servers.
These are not needed for running Ente's server or for self-hosting, these are
just additional services we run to make our infrastructure more robust. As such,
it's unlikely that you'll find the pieces here directly useful for your needs,
but feel free to have a look around if you're curious!

2
infra/copycat-db/.gitignore vendored Normal file
View file

@ -0,0 +1,2 @@
.DS_Store
copycat-db.env

View file

@ -0,0 +1,34 @@
FROM ubuntu:latest
RUN apt-get update && apt-get install -y curl gnupg
RUN apt-get install -y tini
# Install pg_dump (via Postgres client)
# https://www.postgresql.org/download/linux/ubuntu/
#
# We don't need it for production backups, but this is useful for local testing.
RUN \
apt-get install -y lsb-release && \
sh -c 'echo "deb http://apt.postgresql.org/pub/repos/apt $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/pgdg.list' && \
curl https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add - && \
apt-get update && \
apt-get -y install postgresql-client-12
# Install SCW CLI
# Latest release: https://github.com/scaleway/scaleway-cli/releases/latest
RUN \
export VERSION="2.26.0" && \
curl -o /usr/local/bin/scw -L "https://github.com/scaleway/scaleway-cli/releases/download/v${VERSION}/scaleway-cli_${VERSION}_linux_amd64" && \
chmod +x /usr/local/bin/scw
RUN apt-get install -y jq
# Install rclone
RUN apt-get install -y unzip
RUN curl https://rclone.org/install.sh | bash
COPY src /
ENTRYPOINT ["tini", "--"]
CMD [ "/backup.sh" ]

172
infra/copycat-db/README.md Normal file
View file

@ -0,0 +1,172 @@
# Copycat DB
Copycat DB is a [service](../services/README.md) to take a backup of our
database. It uses the Scaleway CLI to take backups of the database, and uploads
them to an offsite bucket.
This bucket has an object lock configured, so backups cannot be deleted before
expiry. Conversely, the service also deletes backups older than some threshold
when it creates a new one to avoid indefinite retention.
In production the service runs as a cron job, scheduled using a systemd timer.
> These backups are in addition to the regular snapshots that we take, and are
> meant as a second layer of replication. For more details, see our
> [Reliability and Replication Specification](https://ente.io/reliability).
## Quick help
View service status (it gets invoked as a timer automatically, doesn't need to
be started/stopped manually):
```sh
sudo systemctl status copycat-db
```
View logs locally (they'll also be available on Grafana):
```sh
sudo tail /root/var/logs/copycat-db.log
```
## Name
The name copycat-db is a riff on "copycat", which is what we call our museum
instance that does the object replication. This one replicates the DB, so,
copycat-db.
## Required environment variables
##### SCW_CONFIG_PATH
Path to the `config.yaml` used by Scaleway CLI.
This contains the credentials and the default region to use when trying to
create and download the database dump.
If needed, this config file can be generated by running the following commands
on a shell prompt in the container (using `./test.sh sh`)
scw init
scw config dump
##### SCW_RDB_INSTANCE_ID
The UUID of the Scalway RDB instance that we wish to backup. If this is missing,
then the Docker image falls back to using `pg_dump` (as outlined next).
##### PGUSER, PGPASSWORD, PGHOST
Not needed in production when taking a backup (since we use the Scaleway CLI to
take backups in production).
These are used when testing a backup using `pg_dump`, and when restoring
backups.
##### RCLONE_CONFIG
Location of the config file, that contains the destination bucket where you want
to use to save the backups, and the credentials to to access it.
Specifically, the config file contains two remotes:
- The bucket itself, where data will be stored.
- A "crypt" remote that wraps the bucket by applying client side encryption.
The configuration file will contain (lightly) obfuscated versions of the
password, and as long as we have the configuration file we can continue using
rclone to download and decrypt the plaintext. Still, it is helpful to retain the
original password too separately so that the file can be recreated if needed.
A config file can be generated using `./test.sh sh`
rclone config
rclone config show
When generating the config, we keep file (and directory) name encryption off.
Note that rclone creates a backup of the config file, so Docker needs to have
write access to the directory where it is mounted.
##### RCLONE_DESTINATION
Name of the (crypt) remote to which the dump should be saved. Example:
`db-backup-crypt:`.
Note that this will not include the bucket - the bucket name will be part of the
remote that the crypt remote wraps.
##### Logging
The service logs to its standard out/error. The systemd unit is configured to
route these to `/var/logs/copycat-db.log`.
## Local testing
The provided `test.sh` script can be used to do a smoke test for building and
running the image. For example,
./test.sh bin/bash
gives us a shell prompt inside the built and running container.
For more thorough testing, run this service as part of a local test-cluster.
## Restoring
The service also knows how to restore the latest backup into a Postgres
instance. This functionality by a separate service (Phoenix) to periodically
verify that the backups are restorable.
To invoke this, use "./restore.sh" as the command when running the container
(e.g. `./test.sh ./restore.sh`). This will restore the latest backup into the
Postgres instance whose credentials are provided via the various `PG*`
environment variables.
## Preparing the bucket
The database dumps are stored in a bucket that has object lock enabled
(compliance mode), and has a default bucket level retention time of 30 days.
## Deploying
Ensure that promtail is running, and is configured to scrape
`/root/var/logs/copycat-db.log`.
Create that the config and log destination directories
sudo mkdir -p /root/var/config/scw
sudo mkdir -p /root/var/config/rclone
sudo mkdir -p /root/var/logs
Create the env, scw and rclone configuration files
sudo tee /root/copycat-db.env
sudo tee /root/var/config/scw/copycat-db-config.yaml
sudo tee /root/var/config/rclone/copycat-db-rclone.conf
Add the service definition, and start the service
scp copycat-db.{service,timer} instance:
sudo mv copycat-db.{service,timer} /etc/systemd/system
sudo systemctl daemon-reload
To start the cron job
sudo systemctl start copycat-db.timer
The timer will trigger the service on the specified schedule. In addition, if
you wish to force the job to service immediately
sudo systemctl start copycat-db.service
## Updating
To update, run the
[GitHub workflow](../../.github/workflows/copycat-db-release.yaml) to build and
push the latest image to our Docker Registry, then restart the systemd service
on the instance
sudo systemctl restart copycat-db

View file

@ -0,0 +1,8 @@
SCW_CONFIG_PATH=/var/config/scw/copycat-db-config.yaml
SCW_RDB_INSTANCE_ID=
RCLONE_CONFIG=/var/config/rclone/copycat-db-rclone.conf
RCLONE_DESTINATION=db-backup-crypt:
PGUSER=
PGPASSWORD=
PGHOST=host.docker.internal
PGPORT=

View file

@ -0,0 +1,20 @@
[Unit]
Documentation=https://github.com/ente-io/ente/blob/main/infra/copycat-db
Requires=docker.service
After=docker.service
[Service]
Restart=always
RestartSec=3600s
# Don't automatically restart if it fails more than 6 times in 24 hours.
StartLimitInterval=86400
StartLimitBurst=6
ExecStartPre=docker pull rg.fr-par.scw.cloud/ente/copycat-db
ExecStartPre=-docker stop copycat-db
ExecStartPre=-docker rm copycat-db
ExecStart=docker run --name copycat-db \
--env-file /root/copycat-db.env \
-v /root/var:/var \
rg.fr-par.scw.cloud/ente/copycat-db
StandardOutput=append:/root/var/logs/copycat-db.log
StandardError=inherit

View file

@ -0,0 +1,8 @@
[Unit]
Description=Schedule copycat-db
[Timer]
OnCalendar=Daily
[Install]
WantedBy=timers.target

50
infra/copycat-db/src/backup.sh Executable file
View file

@ -0,0 +1,50 @@
#!/bin/bash
set -o errexit
set -o xtrace
NOWS="$(date +%s)"
BACKUP_FILE="db-$NOWS.custom"
# Scaleway backup names cannot contain dots
BACKUP_NAME="db-$NOWS-custom"
# Calculate an expiry time 1 month from now
EXPIRYS="$(( 30 * 24 * 60 * 60 + $NOWS ))"
# Convert it to the ISO 8601 format that SCW CLI understands
# Note that GNU date uses "-d" and an "@" to pass an epoch (macOS uses "-r").
EXPIRY="$(date -Iseconds --utc --date "@$EXPIRYS")"
if test -z "$SCW_RDB_INSTANCE_ID"
then
# A required SCW related environment variable hasn't been specified. This is
# expected when running the script locally for testing. Fallback to using
# pg_dump for creating the backup.
pg_dump -Fc ente_db > $BACKUP_FILE
else
# We need to export a backup first after creating it, before it can be
# downloaded.
#
# Further, our backups currently take longer than the default 20 minute
# timeout for the export set by Scaleway, and end up failing:
#
# {"error":"scaleway-sdk-go: waiting for database backup failed: timeout after 20m0s"}
#
# To avoid this we need to add a custom wait here ourselves instead of using
# the convenience `--wait` flag for the export command provided by Scaleway.
BACKUP_ID=$(scw rdb backup create instance-id=$SCW_RDB_INSTANCE_ID \
name=$BACKUP_NAME expires-at=$EXPIRY \
database-name=ente_db -o json | jq -r '.id')
scw rdb backup wait $BACKUP_ID timeout=5h
scw rdb backup download output=$BACKUP_FILE \
$(scw rdb backup export $BACKUP_ID --wait -o json | jq -r '.id')
fi
rclone copy --log-level INFO $BACKUP_FILE $RCLONE_DESTINATION
# Delete older backups
rclone delete --log-level INFO --min-age 30d $RCLONE_DESTINATION
set +o xtrace
echo "copycat-db: backup complete: $BACKUP_FILE"

42
infra/copycat-db/src/restore.sh Executable file
View file

@ -0,0 +1,42 @@
#!/bin/bash
set -o errexit
set -o xtrace
# Find the name of the latest backup
# The backup file name contains the epoch, so we can just sort.
BACKUP_FILE=$(rclone lsf --include 'db-*.custom' --files-only $RCLONE_DESTINATION | sort | tail -1)
# Download it
rclone copy --log-level INFO "${RCLONE_DESTINATION}${BACKUP_FILE}" .
# Restore from it
#
# This create a database named rdb on Postgres - this is only used for the
# initial connection, the actual ente_db database will be created once the
# restore starts.
#
# Flags:
#
# * no-owner: recreates the schema using the current user, not the one that was
# used for the export.
#
# * no-privileges: skip the assignment of roles (this way we do not have to
# recreate all the users from the original database before proceeding with the
# restore)
createdb rdb || true
pg_restore -d rdb --create --no-privileges --no-owner --exit-on-error "$BACKUP_FILE"
# Delete any tokens that were in the backup
psql -d ente_db -c 'delete from tokens'
# Delete any push tokens that were in the backup
psql -d ente_db -c 'delete from push_tokens'
# Delete some more temporary data that might've come up in the backup
psql -d ente_db -c 'delete from queue'
psql -d ente_db -c 'delete from temp_objects'
set +o xtrace
echo "copycat-db: restore complete: $BACKUP_FILE"

20
infra/copycat-db/test.sh Executable file
View file

@ -0,0 +1,20 @@
#!/bin/bash
set -o xtrace
set -o errexit
PROJECT=copycat-db
docker rmi "ente/$PROJECT" || true
docker build --tag "ente/$PROJECT" .
# Interactively run the container.
#
# By passing "$@", we allow any arguments passed to test.sh to be forwarded to
# the image (useful for testing out things, e.g. `./test.sh sh`).
docker run \
--interactive --tty --rm \
--env-file copycat-db.env \
--name "$PROJECT" \
"ente/$PROJECT" \
"$@"

104
infra/services/README.md Normal file
View file

@ -0,0 +1,104 @@
# Services
"Services" are Docker images we run on our instances and manage using systemd.
All our services (including museum itself) follow the same pattern:
- They're run on vanilla Ubuntu instances. The only expectation they have is
for Docker to be installed.
- They log to fixed, known, locations - `/root/var/log/foo.log` - so that
these logs can get ingested by Promtail if needed.
- Each service should consist of a Docker image (or a Docker compose file),
and a systemd unit file.
- To start / stop / schedule the service, we use systemd.
- Each time the service runs it should pull the latest Docker image, so there
is no separate installation/upgrade step needed. We can just restart the
service, and it'll use the latest code.
- Any credentials and/or configuration should be read by mounting the
appropriate file from `/root/service-name` into the running Docker
container.
## Systemd cheatsheet
```sh
sudo systemctl status my-service
sudo systemctl start my-service
sudo systemctl stop my-service
sudo systemctl restart my-service
sudo journalctl --unit my-service
```
## Adding a service
Create a systemd unit file (See the various `*.service` files in this repository
for examples).
If we want the service to start on boot, add an `[Install]` section to its
service file (_note_: starting on boot requires one more step later):
```
[Install]
WantedBy=multi-user.target
```
Copy the service file to the instance where we want to run the service. Services
might also have some additional configuration or env files, also copy those to
the instance.
```sh
scp services/example.service example.env <instance>:
```
SSH into the instance.
```sh
ssh <instance>
```
Move the service `/etc/systemd/service`, and any config files to their expected
place. env and other config files that contain credentials are kept in `/root`.
```sh
sudo mv example.service /etc/systemd/system
sudo mv example.env /root
```
If you want to start the service on boot (as spoken of in the `[Install]`
section above), then enable it (this only needs to be done once):
```sh
sudo systemctl enable service
```
Restarts systemd so that it gets to know of the service.
```sh
sudo systemctl daemon-reload
```
Now you can manage the service using standard systemd commands.
```sh
sudo systemctl start example
```
To view stdout/err, use:
```sh
sudo journalctl --follow --unit example
```
## Logging
Services should log to files in `/var/logs` within the container. This should be
mounted to `/root/var/logs` on the instance (using the `-v` flag in the service
file which launches the Docker container or the Docker compose cluster).
If these logs need to be sent to Grafana, then ensure that there is an entry for
this log file in the `promtail/promtail.yaml` on that instance. The logs will
then get scraped by Promtail and sent over to Grafana.

View file

@ -0,0 +1,32 @@
# Prometheus
Install `prometheus.service` on an instance if it is running something that
exports custom Prometheus metrics. In particular, museum does.
Also install `node-exporter.service` (after installing
[node-exporter](https://prometheus.io/docs/guides/node-exporter/) itself) if it
is a production instance whose metrics (CPU, disk, RAM etc) we want to monitor.
## Installing
Prometheus doesn't currently support environment variables in config file, so
remember to change the hardcoded `XX-HOSTNAME` too in addition to adding the
`remote_write` configuration.
```sh
scp -P 7426 services/prometheus/* <instance>:
nano prometheus.yml
sudo mv prometheus.yml /root/prometheus.yml
sudo mv prometheus.service /etc/systemd/system/prometheus.service
sudo mv node-exporter.service /etc/systemd/system/node-exporter.service
```
Tell systemd to pick up new service definitions, enable the units (so that they
automatically start on boot going forward), and start them.
```sh
sudo systemctl daemon-reload
sudo systemctl enable --now node-exporter
sudo systemctl enable --now prometheus
```

View file

@ -0,0 +1,12 @@
[Unit]
Documentation=https://prometheus.io/docs/guides/node-exporter/
Wants=network-online.target
After=network-online.target
[Install]
WantedBy=multi-user.target
[Service]
User=node_exporter
Group=node_exporter
ExecStart=/usr/local/bin/node_exporter

View file

@ -0,0 +1,16 @@
[Unit]
Documentation=https://prometheus.io/docs/prometheus/
Requires=docker.service
After=docker.service
[Install]
WantedBy=multi-user.target
[Service]
ExecStartPre=docker pull prom/prometheus
ExecStartPre=-docker stop prometheus
ExecStartPre=-docker rm prometheus
ExecStart=docker run --name prometheus \
--add-host=host.docker.internal:host-gateway \
-v /root/prometheus.yml:/etc/prometheus/prometheus.yml:ro \
prom/prometheus

View file

@ -0,0 +1,39 @@
# https://prometheus.io/docs/prometheus/latest/configuration/
global:
scrape_interval: 30s # Default is 1m
scrape_configs:
- job_name: museum
static_configs:
- targets: ["host.docker.internal:2112"]
relabel_configs:
- source_labels: [__address__]
regex: ".*"
target_label: instance
replacement: XX-HOSTNAME
- job_name: "prometheus"
static_configs:
- targets: ["localhost:9090"]
relabel_configs:
- source_labels: [__address__]
regex: ".*"
target_label: instance
replacement: XX-HOSTNAME
- job_name: "node"
static_configs:
- targets: ["host.docker.internal:9100"]
relabel_configs:
- source_labels: [__address__]
regex: ".*"
target_label: instance
replacement: XX-HOSTNAME
# Grafana Cloud
remote_write:
- url: https://g/api/prom/push
basic_auth:
username: foo
password: bar

View file

@ -0,0 +1,26 @@
# Promtail
Install `promtail.service` on an instance if it is running something whose logs
we want in Grafana.
## Installing
Replace `client.url` in the config file with the Loki URL that Promtail should
connect to, and move the files to their expected place.
```sh
scp -P 7426 services/promtail/* <instance>:
nano promtail.yaml
sudo mv promtail.yaml /root/promtail.yaml
sudo mv promtail.service /etc/systemd/system/promtail.service
```
Tell systemd to pick up new service definitions, enable the unit (so that it
automatically starts on boot), and start it this time around.
```sh
sudo systemctl daemon-reload
sudo systemctl enable promtail
sudo systemctl start promtail
```

View file

@ -0,0 +1,19 @@
[Unit]
Documentation=https://grafana.com/docs/loki/latest/clients/promtail/
Requires=docker.service
After=docker.service
[Install]
WantedBy=multi-user.target
[Service]
ExecStartPre=docker pull grafana/promtail
ExecStartPre=-docker stop promtail
ExecStartPre=-docker rm promtail
ExecStart=docker run --name promtail \
--hostname "%H" \
-v /root/promtail.yaml:/config.yaml:ro \
-v /var/log:/var/log \
-v /root/var/logs:/var/logs:ro \
-v /var/lib/docker/containers:/var/lib/docker/containers:ro \
grafana/promtail -config.file=/config.yaml -config.expand-env=true

View file

@ -0,0 +1,45 @@
# https://grafana.com/docs/loki/latest/clients/promtail/configuration/
# We don't want Promtail's HTTP / GRPC server.
server:
disable: true
# Loki URL
# For Grafana Cloud, it can be found in the integrations section.
clients:
- url: http://loki:3100/loki/api/v1/push
# Manually add entries for all our services. This is a bit cumbersome, but
# - Retains flexibility in file names.
# - Makes adding job labels easy.
# - Does not get in the way of logrotation.
#
# In addition, also scrape logs from all docker containers.
scrape_configs:
- job_name: museum
static_configs:
- labels:
job: museum
host: ${HOSTNAME}
__path__: /var/logs/museum.log
- job_name: copycat-db
static_configs:
- labels:
job: copycat-db
host: ${HOSTNAME}
__path__: /var/logs/copycat-db.log
- job_name: phoenix
static_configs:
- labels:
job: phoenix
host: ${HOSTNAME}
__path__: /var/logs/phoenix.log
- job_name: docker
static_configs:
- labels:
job: docker
host: ${HOSTNAME}
__path__: /var/lib/docker/containers/*/*-json.log

View file

@ -23,7 +23,8 @@ If you're looking for Ente Auth instead, see [../auth](../auth/README.md).
### Android
The [GitHub releases](https://github.com/ente-io/photos-app/releases) contain
The [GitHub
releases](https://github.com/ente-io/ente/releases?q=tag%3Aphotos-v0) contain
APKs, built straight from source. The latest build is available at
[ente.io/apk](https://ente.io/apk). These builds keep themselves updated,
without relying on third party stores.

View file

@ -27,7 +27,7 @@ FEATURES
- und noch VIELES mehr!
BERECHTIGUNGEN
Diese können unter folgendem Link überprüft werden: https://github.com/ente-io/photos-app/blob/f-droid/android/permissions.md
Diese können unter folgendem Link überprüft werden: https://github.com/ente-io/ente/blob/f-droid/mobile/android/permissions.md
PREIS
Wir bieten keine lebenslang kostenlosen Abonnements an, da es für uns wichtig ist, einen nachhaltigen Service anzubieten. Wir bieten jedoch bezahlbare Abonemments an, welche auch mit der Familie geteilt werden können. Mehr Informationen sind auf ente.io zu finden.

View file

@ -27,7 +27,7 @@ FEATURES
- and a LOT more!
PERMISSIONS
ente requests for certain permissions to serve the purpose of a photo storage provider, which can be reviewed here: https://github.com/ente-io/photos-app/blob/f-droid/android/permissions.md
ente requests for certain permissions to serve the purpose of a photo storage provider, which can be reviewed here: https://github.com/ente-io/ente/blob/f-droid/mobile/android/permissions.md
PRICING
We don't offer forever free plans, because it is important to us that we remain sustainable and withstand the test of time. Instead we offer affordable plans that you can freely share with your family. You can find more information at ente.io.

View file

@ -27,7 +27,7 @@ CARACTERÍSTICAS
- ¡Y mucho más!
PERMISOS
ente solicita ciertos permisos para servir al propósito de un proveedor de almacenamiento de fotos, que puede ser revisado aquí: https://github.com/ente-io/photos-app/blob/f-droid/android/permissions.md
ente solicita ciertos permisos para servir al propósito de un proveedor de almacenamiento de fotos, que puede ser revisado aquí: https://github.com/ente-io/ente/blob/f-droid/mobile/android/permissions.md
PRECIOS
No ofrecemos planes gratis para siempre, porque es importante para nosotros seguir siendo sostenibles y resistir a la prueba del tiempo. En su lugar, ofrecemos planes asequibles que puedes compartir libremente con tu familia. Puedes encontrar más información en ente.io.

View file

@ -27,7 +27,7 @@ CARACTÉRISTIQUES
- et beaucoup de choses encore !
PERMISSIONS
ente sollicite diverses autorisations dans le but de fonctionner en tant que service de stockage de photos, et ces autorisations sont détaillées ici : https://github.com/ente-io/photos-app/blob/f-droid/android/permissions.md
ente sollicite diverses autorisations dans le but de fonctionner en tant que service de stockage de photos, et ces autorisations sont détaillées ici : https://github.com/ente-io/ente/blob/f-droid/mobile/android/permissions.md
PRIX
Nous ne proposons pas d'abonnement gratuits pour toujours, car il est important pour nous de rester durables et de résister à l'épreuve du temps. Au lieu de cela, nous vous proposons des abonnements abordables que vous pouvez partager librement avec votre famille. Vous pouvez trouver plus d'informations sur ente.io.

View file

@ -27,7 +27,7 @@ ente גם מקל על שיתוף האלבומים שלך עם קרובך, גם
- ועוד הרבה יותר!
הרשאות
ente מבקש הרשאות מסוימות כדי לספק שירותי אחסון תמונות, וניתן לסקור אותן כאן: https://github.com/ente-io/photos-app/blob/f-droid/android/permissions.md
ente מבקש הרשאות מסוימות כדי לספק שירותי אחסון תמונות, וניתן לסקור אותן כאן: https://github.com/ente-io/ente/blob/f-droid/mobile/android/permissions.md
מחיר
אנחנו לא מציעים תוכניות בחינם לתמיד, משום שזה חשוב לנו להיות עמידים ולעמוד במבחן הזמן. במקום זאת אנחנו מציעים תוכניות במחיר סביר כדי שתוכל לשתף באופן חופשי עם המשפחה שלך. ניתן למצוא עוד מידע ב-ente.io.

View file

@ -27,7 +27,7 @@ CARATTERISTICHE
- e molto altro ancora!
PERMESSI
ente richiede alcune autorizzazioni per servire lo scopo di un provider di storage fotografico, che può essere esaminato qui: https://github.com/ente-io/photos-app/blob/f-droid/android/permissions.md
ente richiede alcune autorizzazioni per servire lo scopo di un provider di storage fotografico, che può essere esaminato qui: https://github.com/ente-io/ente/blob/f-droid/mobile/android/permissions.md
PREZZO
Non offriamo piani gratuiti per sempre, perché per noi è importante rimanere sostenibili e resistere alla prova del tempo. Offriamo invece piani accessibili che si possono condividere liberamente con la propria famiglia. Puoi trovare maggiori informazioni su ente.io.

View file

@ -27,7 +27,7 @@ FUNCTIES
- en nog veel meer!
TOESTEMMINGEN
ente heeft bepaalde machtigingen nodig om uw foto's op te slaan, die hier bekeken kunnen worden: https://github.com/ente-io/photos-app/blob/f-droid/android/permissions.md
ente heeft bepaalde machtigingen nodig om uw foto's op te slaan, die hier bekeken kunnen worden: https://github.com/ente-io/ente/blob/f-droid/mobile/android/permissions.md
PRIJZEN
We bieden geen oneindig gratis plannen aan, omdat het voor ons belangrijk is dat we duurzaam blijven en de tand des tijds weerstaan. In plaats daarvan bieden we betaalbare plannen aan die je vrij kunt delen met je familie. Je kunt meer informatie vinden op ente.io.

View file

@ -27,7 +27,7 @@ RECURSOS
- e MUITO MAIS!
PERMISSÕES
ente solicita certas permissões para servir o propósito de um provedor de armazenamento de fotos, que pode ser revisado aqui: https://github.com/ente-io/photos-app/blob/f-droid/android/permissions.md
ente solicita certas permissões para servir o propósito de um provedor de armazenamento de fotos, que pode ser revisado aqui: https://github.com/ente-io/ente/blob/f-droid/mobile/android/permissions.md
PREÇO
Não oferecemos planos gratuitos para sempre, porque é importante para nós que permaneçamos sustentáveis e resistamos à prova do tempo. Em vez disso, oferecemos planos acessíveis que você pode compartilhar livremente com sua família. Você pode encontrar mais informações em ente.io.

View file

@ -27,7 +27,7 @@ ente также делает так, что делится альбомами с
- и ещё МНОГОЕ другое!
РАЗРЕШЕНИЯ
ente просит разрешения на использование хранилища фотографий, которые можно рассмотреть здесь: https://github.com/ente-io/photos-app/blob/f-droid/android/permissions.md
ente просит разрешения на использование хранилища фотографий, которые можно рассмотреть здесь: https://github.com/ente-io/ente/blob/f-droid/mobile/android/permissions.md
ЦЕНА
Мы не предлагаем бесконечные бесплатные планы, потому что для нас важно оставаться устойчивыми и выдерживать испытание временем. Вместо этого мы предлагаем доступные по цене планы, которыми вы можете свободно делиться с вашей семьей. Дополнительную информацию можно найти на сайте ente.io.

View file

@ -4,7 +4,7 @@ ente 是一个简单的应用程序来备份和分享您的照片和视频。
我们在Android、iOS、web 和桌面上有开源应用, 和您的照片将以端到端加密方式 (e2ee) 无缝同步。
ente也使分享相册给自己的爱人、亲人变得轻而易举即使他们可能并不使用ente。 您可以分享可公开查看的链接使他们可以查看您的相册并通过添加照片来协作而不需要注册账户或下载app。 权限
ente也使分享相册给自己的爱人、亲人变得轻而易举即使他们可能并不使用ente。 您可以分享可公开查看的链接使他们可以查看您的相册并通过添加照片来协作而不需要注册账户或下载app。 ente也使分享相册给自己的爱人、亲人变得轻而易举即使他们可能并不使用ente。 您可以分享可公开查看的链接使他们可以查看您的相册并通过添加照片来协作而不需要注册账户或下载app。 权限
您的加密数据已复制到三个不同的地点,包括巴黎的一个安全屋。 我们认真对待子孙后代,并确保您的回忆比您长寿。 我们认真对待子孙后代,并确保您的回忆比您长寿。
@ -27,10 +27,10 @@ ente也使分享相册给自己的爱人、亲人变得轻而易举即使他
- 还有更多特色待你发现!
权限
ente需要特定权限以执行作为图像存储提供商的职责相关内容可以在此链接查阅https://github.com/ente-io/photos-app/blob/f-droid/android/permissions.md
ente需要特定权限以执行作为图像存储提供商的职责相关内容可以在此链接查阅https://github.com/ente-io/ente/blob/f-droid/mobile/android/permissions.md
价格
我们不会提供永久免费计划,因为我们必须保持可持续性,经受住时间的考验。 相反,我们向您提供了价格实惠、可自由分享的订阅计划。 您可以在 ente.io 找到更多信息。 相反,我们向您提供了价格实惠、可自由分享的订阅计划。 您可以在 ente.io 找到更多信息。
我们不会提供永久免费计划,因为我们必须保持可持续性,经受住时间的考验。 相反,我们向您提供了价格实惠、可自由分享的订阅计划。 您可以在 ente.io 找到更多信息。 相反,我们向您提供了价格实惠、可自由分享的订阅计划。 您可以在 ente.io 找到更多信息。 相反,我们向您提供了价格实惠、可自由分享的订阅计划。 您可以在 ente.io 找到更多信息。
支持
我们对提供真人支持感到自豪。 我们对提供真人支持感到自豪。 如果您是我们的付费客户,您可以联系 team@ente.io 并在24小时内收到来自我们团队的回复。

View file

@ -936,7 +936,7 @@ packages:
path: "."
ref: main
resolved-ref: "49158ce4a517e87817dc84c6b96c00639281229a"
url: "https://github.com/prateekmedia/FlutterHomeWidget"
url: "https://github.com/ente-io/FlutterHomeWidget"
source: git
version: "0.4.1"
html:

View file

@ -93,7 +93,7 @@ dependencies:
google_nav_bar: ^5.0.5
home_widget:
git:
url: https://github.com/prateekmedia/FlutterHomeWidget
url: https://github.com/ente-io/FlutterHomeWidget
ref: main
html_unescape: ^2.0.0
http: ^1.1.0

View file

@ -113,10 +113,7 @@ s3:
#
# 1. Disable SSL.
#
# 2. Use "path" style S3 URLs where the bucket is part of the URL path, e.g.
# http://localhost:3200/b2-eu-cen. By default the bucket name is part of
# the (sub)domain, e.g. http://b2-eu-cen.localhost:3200/ and cannot be
# resolved when running locally.
# 2. Use "path" style S3 URLs (see `use_path_style_urls` below).
#
# 3. Directly download the file during replication instead of going via the
# Cloudflare worker.
@ -125,6 +122,17 @@ s3:
# not support them, specifically it doesn't support GLACIER).
#
#are_local_buckets: true
# Uncomment this to use "path" style S3 URLs.
#
# By default the bucket name is part of the (sub)domain, e.g.
# http://b2-eu-cen.localhost:3200/. If this is true, then we use "path"
# style S3 URLs where the bucket is part of the URL path, e.g.
# http://localhost:3200/b2-eu-cen.
#
# This is useful in scenarios when sub-domain based addressing cannot be
# resolved, e.g. when running a local instance, or when using MinIO as a
# production store.
#use_path_style_urls: true
# Key used for encrypting customer emails before storing them in DB
#
@ -174,7 +182,7 @@ stripe:
webauthn:
rpid: "example.com"
rporigins:
- "https://example.com:3005"
- "https://example.com:3005"
# Roadmap SSO (optional)
#
@ -220,13 +228,17 @@ internal:
# If provided, this external healthcheck url is periodically pinged.
health-check-url:
# Hardcoded verification codes, useful for logging in when developing.
hardcoded-ott:
emails:
- "example@example.org,123456"
# When running in a local environment, hardcode the verification code to
# 123456 for email addresses ending with @example.org
local-domain-suffix: "@example.org"
local-domain-value: 123456
#
# Uncomment this and set these to your email ID or domain so that you don't
# need to peek into the server logs for obtaining the OTP when trying to log
# into an instance you're developing on.
# hardcoded-ott:
# emails:
# - "example@example.org,123456"
# # When running in a local environment, hardcode the verification code to
# # 123456 for email addresses ending with @example.org
# local-domain-suffix: "@example.org"
# local-domain-value: 123456
# List of user IDs that can use the admin API endpoints.
admins: []

View file

@ -104,6 +104,7 @@ func (config *S3Config) initialize() {
config.s3Configs = make(map[string]*aws.Config)
config.s3Clients = make(map[string]s3.S3)
usePathStyleURLs := viper.GetBool("s3.use_path_style_urls")
areLocalBuckets := viper.GetBool("s3.are_local_buckets")
config.areLocalBuckets = areLocalBuckets
@ -116,6 +117,9 @@ func (config *S3Config) initialize() {
Endpoint: aws.String(viper.GetString("s3." + dc + ".endpoint")),
Region: aws.String(viper.GetString("s3." + dc + ".region")),
}
if usePathStyleURLs {
s3Config.S3ForcePathStyle = aws.Bool(true)
}
if areLocalBuckets {
s3Config.DisableSSL = aws.Bool(true)
s3Config.S3ForcePathStyle = aws.Bool(true)

Binary file not shown.

Before

Width:  |  Height:  |  Size: 102 KiB

After

Width:  |  Height:  |  Size: 162 KiB

View file

@ -338,8 +338,8 @@
"SORT_BY_CREATION_TIME_ASCENDING": "Ältestem",
"SORT_BY_UPDATION_TIME_DESCENDING": "Zuletzt aktualisiert",
"SORT_BY_NAME": "Name",
"COMPRESS_THUMBNAILS": "",
"THUMBNAIL_REPLACED": "",
"COMPRESS_THUMBNAILS": "Vorschaubilder komprimieren",
"THUMBNAIL_REPLACED": "Vorschaubilder komprimiert",
"FIX_THUMBNAIL": "Komprimiere",
"FIX_THUMBNAIL_LATER": "Später komprimieren",
"REPLACE_THUMBNAIL_NOT_STARTED": "",
@ -352,7 +352,7 @@
"UPDATE_CREATION_TIME_NOT_STARTED": "",
"UPDATE_CREATION_TIME_COMPLETED": "",
"UPDATE_CREATION_TIME_COMPLETED_WITH_ERROR": "",
"CAPTION_CHARACTER_LIMIT": "",
"CAPTION_CHARACTER_LIMIT": "Maximal 5000 Zeichen",
"DATE_TIME_ORIGINAL": "",
"DATE_TIME_DIGITIZED": "",
"METADATA_DATE": "",

View file

@ -8,7 +8,7 @@
"LOGIN": "Entrar",
"SIGN_UP": "Registrar",
"NEW_USER": "Novo no ente",
"EXISTING_USER": "Utilizador existente",
"EXISTING_USER": "Usuário existente",
"ENTER_NAME": "Insira o nome",
"PUBLIC_UPLOADER_NAME_MESSAGE": "Adicione um nome para que os seus amigos saibam a quem agradecer por estas ótimas fotos!",
"ENTER_EMAIL": "Insira o endereço de e-mail",
@ -227,7 +227,7 @@
"INDEXING_SCHEDULED": "Indexação está programada...",
"ANALYZING_PHOTOS": "Indexando fotos ({{indexStatus.nSyncedFiles,number}} / {{indexStatus.nTotalFiles,number}})",
"INDEXING_PEOPLE": "Indexando pessoas em {{indexStatus.nSyncedFiles,number}} fotos...",
"INDEXING_DONE": "",
"INDEXING_DONE": "Foram indexadas {{indexStatus.nSyncedFiles,number}} fotos",
"UNIDENTIFIED_FACES": "rostos não identificados",
"OBJECTS": "objetos",
"TEXT": "texto",
@ -347,15 +347,15 @@
"REPLACE_THUMBNAIL_NOOP": "Você não tem nenhuma miniatura que possa ser compactadas mais",
"REPLACE_THUMBNAIL_COMPLETED_WITH_ERROR": "Não foi possível compactar algumas das suas miniaturas, por favor tente novamente",
"FIX_CREATION_TIME": "Corrigir hora",
"FIX_CREATION_TIME_IN_PROGRESS": "",
"CREATION_TIME_UPDATED": "",
"FIX_CREATION_TIME_IN_PROGRESS": "Corrigindo horário",
"CREATION_TIME_UPDATED": "Hora do arquivo atualizado",
"UPDATE_CREATION_TIME_NOT_STARTED": "Selecione a carteira que você deseja usar",
"UPDATE_CREATION_TIME_COMPLETED": "Todos os arquivos atualizados com sucesso",
"UPDATE_CREATION_TIME_COMPLETED_WITH_ERROR": "",
"UPDATE_CREATION_TIME_COMPLETED_WITH_ERROR": "A atualização do horário falhou para alguns arquivos, por favor, tente novamente",
"CAPTION_CHARACTER_LIMIT": "5000 caracteres no máximo",
"DATE_TIME_ORIGINAL": "",
"DATE_TIME_DIGITIZED": "",
"METADATA_DATE": "",
"DATE_TIME_ORIGINAL": "Data e Hora Original",
"DATE_TIME_DIGITIZED": "Data e Hora Digitalizada",
"METADATA_DATE": "Data de Metadados",
"CUSTOM_TIME": "Tempo personalizado",
"REOPEN_PLAN_SELECTOR_MODAL": "Reabrir planos",
"OPEN_PLAN_SELECTOR_MODAL_FAILED": "Falha ao abrir planos",
@ -408,7 +408,7 @@
"SHARED_USING": "Compartilhar usando ",
"ENTE_IO": "ente.io",
"SHARING_REFERRAL_CODE": "Use o código <strong>{{referralCode}}</strong> para obter 10 GB de graça",
"LIVE": "",
"LIVE": "AO VIVO",
"DISABLE_PASSWORD": "Desativar bloqueio por senha",
"DISABLE_PASSWORD_MESSAGE": "Tem certeza que deseja desativar o bloqueio por senha?",
"PASSWORD_LOCK": "Bloqueio de senha",
@ -506,8 +506,8 @@
"DISABLE_FACE_SEARCH_DESCRIPTION": "<p>Ente irá parar de processar geometria facial.</p><p>Você pode reativar o reconhecimento facial novamente, se desejar, então esta operação está segura.</p>",
"ADVANCED": "Avançado",
"FACE_SEARCH_CONFIRMATION": "Eu entendo, e desejo permitir que o ente processe a geometria do rosto",
"LABS": "",
"YOURS": "",
"LABS": "Laboratórios",
"YOURS": "seu",
"PASSPHRASE_STRENGTH_WEAK": "Força da senha: fraca",
"PASSPHRASE_STRENGTH_MODERATE": "Força da senha: moderada",
"PASSPHRASE_STRENGTH_STRONG": "Força da senha: forte",
@ -570,7 +570,7 @@
"FEEDBACK_REQUIRED": "Por favor, ajude-nos com esta informação",
"FEEDBACK_REQUIRED_FOUND_ANOTHER_SERVICE": "O que o outro serviço faz melhor?",
"RECOVER_TWO_FACTOR": "Recuperar dois fatores",
"at": "",
"at": "em",
"AUTH_NEXT": "próximo",
"AUTH_DOWNLOAD_MOBILE_APP": "Baixe nosso aplicativo móvel para gerenciar seus segredos",
"HIDDEN": "Escondido",
@ -604,7 +604,7 @@
"BLUR": "Desfoque",
"INVERT_COLORS": "Inverter Cores",
"ASPECT_RATIO": "Proporção da imagem",
"SQUARE": "",
"SQUARE": "Quadrado",
"ROTATE_LEFT": "Girar para a Esquerda",
"ROTATE_RIGHT": "Girar para a Direita",
"FLIP_VERTICALLY": "Inverter verticalmente",
@ -634,8 +634,8 @@
"VISIT_CAST_ENTE_IO": "Acesse cast.ente.io no dispositivo que você deseja parear.",
"CAST_AUTO_PAIR_FAILED": "Chromecast Auto Pair falhou. Por favor, tente novamente.",
"CACHE_DIRECTORY": "Pasta de Cache",
"PASSKEYS": "",
"FREEHAND": "",
"PASSKEYS": "Chaves de acesso",
"FREEHAND": "Mão livre",
"APPLY_CROP": "Aplicar Recorte",
"PHOTO_EDIT_REQUIRED_TO_SAVE": "Pelo menos uma transformação ou ajuste de cor deve ser feito antes de salvar."
}

View file

@ -178,6 +178,14 @@ function PhotoViewer(props: Iprops) {
switch (event.key) {
case "i":
case "I":
// Enhancement: This should be calling handleOpenInfo, but
// that handling the case where a keybinding triggers an
// exit from fullscreen and opening the info drawer is not
// currently working (the info drawer opens, but the exit
// from fullscreen doesn't happen).
//
// So for now, let the keybinding only work when not in
// fullscreen instead of doing a mish-mash.
setShowInfo(true);
break;
case "Backspace":
@ -616,7 +624,18 @@ function PhotoViewer(props: Iprops) {
const handleCloseInfo = () => {
setShowInfo(false);
};
const handleOpenInfo = () => {
const handleOpenInfo = (photoSwipe: any) => {
// Get out of full screen mode if needed first to be able to show info
if (isInFullScreenMode) {
const fullScreenApi: PhotoswipeFullscreenAPI =
photoSwipe?.ui?.getFullscreenAPI();
if (fullScreenApi && fullScreenApi.isFullscreen()) {
fullScreenApi.exit();
setIsInFullScreenMode(false);
}
}
setShowInfo(true);
};
@ -851,7 +870,7 @@ function PhotoViewer(props: Iprops) {
<button
className="pswp__button pswp__button--custom"
title={t("INFO_OPTION")}
onClick={handleOpenInfo}
onClick={() => handleOpenInfo(photoSwipe)}
>
<InfoIcon />
</button>