mirror of
https://github.com/SoPat712/BeReal-Export-Manager.git
synced 2026-02-10 07:58:38 -05:00
Compare commits
3 Commits
2946785f55
...
b9fadc9c31
| Author | SHA1 | Date | |
|---|---|---|---|
| b9fadc9c31 | |||
|
e9dc1a3182
|
|||
|
83398198c3
|
190
.gitignore
vendored
190
.gitignore
vendored
@@ -1,24 +1,26 @@
|
|||||||
# Byte-compiled / optimized / DLL files
|
# Input and output directories
|
||||||
|
input/
|
||||||
|
output/
|
||||||
|
|
||||||
|
# BeReal export files (should be in input folder anyway)
|
||||||
|
memories.json
|
||||||
|
posts.json
|
||||||
|
realmojis.json
|
||||||
|
|
||||||
|
# macOS
|
||||||
|
.DS_Store
|
||||||
|
.DS_Store?
|
||||||
|
._*
|
||||||
|
.Spotlight-V100
|
||||||
|
.Trashes
|
||||||
|
ehthumbs.db
|
||||||
|
Thumbs.db
|
||||||
|
|
||||||
|
# Python
|
||||||
__pycache__/
|
__pycache__/
|
||||||
*.py[cod]
|
*.py[cod]
|
||||||
*$py.class
|
*$py.class
|
||||||
|
|
||||||
# C extensions
|
|
||||||
*.so
|
*.so
|
||||||
|
|
||||||
#My stuff
|
|
||||||
betterSoFar.py
|
|
||||||
greatSoFar.py
|
|
||||||
|
|
||||||
# json files
|
|
||||||
*.json
|
|
||||||
|
|
||||||
# Input/Output Files
|
|
||||||
Photos/
|
|
||||||
out/
|
|
||||||
correctout/
|
|
||||||
|
|
||||||
# Distribution / packaging
|
|
||||||
.Python
|
.Python
|
||||||
build/
|
build/
|
||||||
develop-eggs/
|
develop-eggs/
|
||||||
@@ -32,158 +34,26 @@ parts/
|
|||||||
sdist/
|
sdist/
|
||||||
var/
|
var/
|
||||||
wheels/
|
wheels/
|
||||||
share/python-wheels/
|
|
||||||
*.egg-info/
|
*.egg-info/
|
||||||
.installed.cfg
|
.installed.cfg
|
||||||
*.egg
|
*.egg
|
||||||
MANIFEST
|
MANIFEST
|
||||||
|
|
||||||
# PyInstaller
|
# Virtual environments
|
||||||
# Usually these files are written by a python script from a template
|
|
||||||
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
|
||||||
*.manifest
|
|
||||||
*.spec
|
|
||||||
|
|
||||||
# Installer logs
|
|
||||||
pip-log.txt
|
|
||||||
pip-delete-this-directory.txt
|
|
||||||
|
|
||||||
# Unit test / coverage reports
|
|
||||||
htmlcov/
|
|
||||||
.tox/
|
|
||||||
.nox/
|
|
||||||
.coverage
|
|
||||||
.coverage.*
|
|
||||||
.cache
|
|
||||||
nosetests.xml
|
|
||||||
coverage.xml
|
|
||||||
*.cover
|
|
||||||
*.py,cover
|
|
||||||
.hypothesis/
|
|
||||||
.pytest_cache/
|
|
||||||
cover/
|
|
||||||
|
|
||||||
# Translations
|
|
||||||
*.mo
|
|
||||||
*.pot
|
|
||||||
|
|
||||||
# Django stuff:
|
|
||||||
*.log
|
|
||||||
local_settings.py
|
|
||||||
db.sqlite3
|
|
||||||
db.sqlite3-journal
|
|
||||||
|
|
||||||
# Flask stuff:
|
|
||||||
instance/
|
|
||||||
.webassets-cache
|
|
||||||
|
|
||||||
# Scrapy stuff:
|
|
||||||
.scrapy
|
|
||||||
|
|
||||||
# Sphinx documentation
|
|
||||||
docs/_build/
|
|
||||||
|
|
||||||
# PyBuilder
|
|
||||||
.pybuilder/
|
|
||||||
target/
|
|
||||||
|
|
||||||
# Jupyter Notebook
|
|
||||||
.ipynb_checkpoints
|
|
||||||
|
|
||||||
# IPython
|
|
||||||
profile_default/
|
|
||||||
ipython_config.py
|
|
||||||
|
|
||||||
# pyenv
|
|
||||||
# For a library or package, you might want to ignore these files since the code is
|
|
||||||
# intended to run in multiple environments; otherwise, check them in:
|
|
||||||
# .python-version
|
|
||||||
|
|
||||||
# pipenv
|
|
||||||
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
|
||||||
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
|
||||||
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
|
||||||
# install all needed dependencies.
|
|
||||||
#Pipfile.lock
|
|
||||||
|
|
||||||
# poetry
|
|
||||||
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
|
||||||
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
|
||||||
# commonly ignored for libraries.
|
|
||||||
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
|
||||||
#poetry.lock
|
|
||||||
|
|
||||||
# pdm
|
|
||||||
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
|
||||||
#pdm.lock
|
|
||||||
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
|
||||||
# in version control.
|
|
||||||
# https://pdm.fming.dev/latest/usage/project/#working-with-version-control
|
|
||||||
.pdm.toml
|
|
||||||
.pdm-python
|
|
||||||
.pdm-build/
|
|
||||||
|
|
||||||
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
|
||||||
__pypackages__/
|
|
||||||
|
|
||||||
# Celery stuff
|
|
||||||
celerybeat-schedule
|
|
||||||
celerybeat.pid
|
|
||||||
|
|
||||||
# SageMath parsed files
|
|
||||||
*.sage.py
|
|
||||||
|
|
||||||
# Environments
|
|
||||||
.env
|
|
||||||
.venv
|
|
||||||
env/
|
|
||||||
venv/
|
venv/
|
||||||
|
env/
|
||||||
ENV/
|
ENV/
|
||||||
env.bak/
|
env.bak/
|
||||||
venv.bak/
|
venv.bak/
|
||||||
|
|
||||||
# Spyder project settings
|
# IDE
|
||||||
.spyderproject
|
|
||||||
.spyproject
|
|
||||||
|
|
||||||
# Rope project settings
|
|
||||||
.ropeproject
|
|
||||||
|
|
||||||
# mkdocs documentation
|
|
||||||
/site
|
|
||||||
|
|
||||||
# mypy
|
|
||||||
.mypy_cache/
|
|
||||||
.dmypy.json
|
|
||||||
dmypy.json
|
|
||||||
|
|
||||||
# Pyre type checker
|
|
||||||
.pyre/
|
|
||||||
|
|
||||||
# pytype static type analyzer
|
|
||||||
.pytype/
|
|
||||||
|
|
||||||
# Cython debug symbols
|
|
||||||
cython_debug/
|
|
||||||
|
|
||||||
# PyCharm
|
|
||||||
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
|
||||||
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
|
||||||
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
|
||||||
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
|
||||||
#.idea/
|
|
||||||
|
|
||||||
|
|
||||||
# VS-Code
|
|
||||||
.vscode/
|
.vscode/
|
||||||
*.code-workspace
|
.idea/
|
||||||
|
*.swp
|
||||||
|
*.swo
|
||||||
|
*~
|
||||||
|
|
||||||
# Local History for Visual Studio Code
|
# Temporary files
|
||||||
.history/
|
*.tmp
|
||||||
|
*.temp
|
||||||
# Built Visual Studio Code Extensions
|
*.log
|
||||||
*.vsix
|
|
||||||
|
|
||||||
|
|
||||||
_tests/
|
|
||||||
tempCodeRunnerFile.py
|
|
||||||
2
LICENSE
2
LICENSE
@@ -1,6 +1,6 @@
|
|||||||
MIT License
|
MIT License
|
||||||
|
|
||||||
Copyright (c) 2024 Lukullul
|
Copyright (c) 2024 SoPat712
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|||||||
114
README.md
114
README.md
@@ -1,21 +1,22 @@
|
|||||||
# BeReal Exporter
|
# BeReal Exporter
|
||||||
|
|
||||||
This python script doesn't export photos and realmojis from the social media platform BeReal directly for that, you have to make a request to the BeReal see [this Reddit post](https://www.reddit.com/r/bereal_app/comments/19dl0yk/experiencetutorial_for_exporting_all_bereal/?utm_source=share&utm_medium=web3x&utm_name=web3xcss&utm_term=1&utm_content=share_button) for more information.
|
This python script doesn't export photos and realmojis from the social media platform BeReal directly - for that, you have to make a request to BeReal. See [this Reddit post](https://www.reddit.com/r/bereal_app/comments/19dl0yk/experiencetutorial_for_exporting_all_bereal/?utm_source=share&utm_medium=web3x&utm_name=web3xcss&utm_term=1&utm_content=share_button) for more information.
|
||||||
|
|
||||||
It simple processes the data from the BeReal export and exports the images(as well BTS-videos) with added metadata, such as the original date and location.
|
It processes the data from the BeReal export and exports the images with added metadata, such as the original date and location. Now supports posts, memories, realmojis, and conversation images with parallel processing for speed. Also has interactive modes for when you want to manually choose which camera is which for conversation images.
|
||||||
|
|
||||||
I'm gonna be upfront and say it's BeReal's fault the dates are wonky on the output files, idk why they chose to save the time like this:
|
I'm gonna be upfront and say it's BeReal's fault the dates are wonky on the output files, idk why they chose to save the time like this:
|
||||||
|
|
||||||
"takenTime": "2024-12-24T01:27:16.726Z",
|
"takenTime": "2024-12-24T01:27:16.726Z",
|
||||||
"berealMoment": "2024-12-23T22:39:05.327Z",
|
"berealMoment": "2024-12-23T22:39:05.327Z",
|
||||||
instead of the way everyone else always does it with UNIX Epoch time, but it makes it pretty hard to find out what time the picture was taken, and to properly tag the photos with the correct time. Scroll down to arguments and see default-timezone for a little more info.
|
|
||||||
|
instead of the way everyone else always does it with UNIX Epoch time, but it makes it pretty hard to find out what time the picture was taken, and to properly tag the photos with the correct time. The script now handles timezone conversion automatically using GPS coordinates when available, falling back to America/New_York timezone.
|
||||||
|
|
||||||
## Installation
|
## Installation
|
||||||
|
|
||||||
1. Clone the repository:
|
1. Clone the repository:
|
||||||
```sh
|
```sh
|
||||||
git clone https://github.com/Lukullul/bereal-exporter.git
|
git@github.com:SoPat712/BeReal-Export-Manager.git
|
||||||
cd bereal-exporter
|
cd BeReal-Export-Manager
|
||||||
```
|
```
|
||||||
|
|
||||||
2. Install the required Python packages:
|
2. Install the required Python packages:
|
||||||
@@ -25,61 +26,132 @@ instead of the way everyone else always does it with UNIX Epoch time, but it mak
|
|||||||
|
|
||||||
3. Ensure you have `exiftool` installed on your system and set it up as a `PATH` variable. You can download it [here](https://exiftool.org/).
|
3. Ensure you have `exiftool` installed on your system and set it up as a `PATH` variable. You can download it [here](https://exiftool.org/).
|
||||||
|
|
||||||
|
4. Put your BeReal export folder in the `input` directory. The script will automatically find it.
|
||||||
|
|
||||||
## Usage
|
## Usage
|
||||||
|
|
||||||
To export your images run the script within the BeReal export folder:
|
Put your BeReal export in the `input` folder and run:
|
||||||
```sh
|
```sh
|
||||||
python bereal_exporter.py [OPTIONS]
|
python bereal_exporter.py [OPTIONS]
|
||||||
```
|
```
|
||||||
|
|
||||||
|
The script automatically finds your export folder and processes everything in parallel for speed.
|
||||||
|
|
||||||
## Options
|
## Options
|
||||||
|
|
||||||
- `-v, --verbose`: Explain what is being done.
|
- `-v, --verbose`: Explain what is being done.
|
||||||
- `-t, --timespan`: Exports the given timespan.
|
- `-t, --timespan`: Exports the given timespan.
|
||||||
- Valid format: `DD.MM.YYYY-DD.MM.YYYY`.
|
- Valid format: `DD.MM.YYYY-DD.MM.YYYY`.
|
||||||
- Wildcards can be used: `DD.MM.YYYY-*`.
|
- Wildcards can be used: `DD.MM.YYYY-*`.
|
||||||
- `--exiftool-path`: Set the path to the ExifTool executable (needed if it isn't on the $PATH)
|
|
||||||
- `-y, --year`: Exports the given year.
|
- `-y, --year`: Exports the given year.
|
||||||
- `-p, --out-path`: Set a custom output path (default is `./out`).
|
- `-p, --out-path`: Set a custom output path (default is `./output`).
|
||||||
- `--bereal-path`: Set a custom BeReal path (default `./`)
|
- `--input-path`: Set the input folder path containing BeReal export (default `./input`).
|
||||||
|
- `--exiftool-path`: Set the path to the ExifTool executable (needed if it isn't on the $PATH).
|
||||||
|
- `--max-workers`: Maximum number of parallel workers (default 4).
|
||||||
- `--no-memories`: Don't export the memories.
|
- `--no-memories`: Don't export the memories.
|
||||||
- `--no-realmojis`: Don't export the realmojis.
|
- `--no-realmojis`: Don't export the realmojis.
|
||||||
- `--no-composites`: Don't create composites with the front image overlayed on the back.
|
- `--no-posts`: Don't export the posts.
|
||||||
- `--default-timezone "America/New_York"`: Set fallback timezone, since memories.json has UTC times.
|
- `--no-conversations`: Don't export the conversations.
|
||||||
This doesn't work the greatest but I do recommend running it with whatever timezone you're in. It goes Lat/Long time finding -> Default Timezone -> UTC or whatever BeReal is providing.
|
- `--conversations-only`: Export only conversations (for debugging).
|
||||||
|
- `--interactive-conversations`: Manually choose front/back camera for conversation images.
|
||||||
|
- `--web-ui`: Use web UI for interactive conversation selection (requires `--interactive-conversations`).
|
||||||
|
|
||||||
|
The script automatically handles timezone conversion using GPS coordinates when available, falling back to America/New_York. It creates composite images with the back camera as the main image and front camera overlaid in the corner with rounded edges and a black border, just like BeReal shows them.
|
||||||
|
|
||||||
## Examples
|
## Examples
|
||||||
|
|
||||||
1. Export data for the year 2022:
|
1. Export everything (default behavior):
|
||||||
|
```sh
|
||||||
|
python bereal_exporter.py
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Export data for the year 2022:
|
||||||
```sh
|
```sh
|
||||||
python bereal_exporter.py --year 2022
|
python bereal_exporter.py --year 2022
|
||||||
```
|
```
|
||||||
|
|
||||||
2. Export data for a specific timespan:
|
3. Export data for a specific timespan:
|
||||||
```sh
|
```sh
|
||||||
python bereal_exporter.py --timespan '04.01.2022-31.12.2022'
|
python bereal_exporter.py --timespan '04.01.2022-31.12.2022'
|
||||||
```
|
```
|
||||||
|
|
||||||
3. Export data to a custom output path:
|
4. Export to a custom output path:
|
||||||
```sh
|
```sh
|
||||||
python bereal_exporter.py --path /path/to/output
|
python bereal_exporter.py --out-path /path/to/output
|
||||||
```
|
```
|
||||||
|
|
||||||
4. Specify the BeReal export folder:
|
5. Use a different input folder:
|
||||||
```sh
|
```sh
|
||||||
python bereal_exporter.py --bereal-path /path/to/export
|
python bereal_exporter.py --input-path /path/to/bereal/export
|
||||||
```
|
```
|
||||||
|
|
||||||
4. Use portable installed exiftool application:
|
6. Use portable exiftool:
|
||||||
```sh
|
```sh
|
||||||
python bereal_exporter.py --exiftool-path /path/to/exiftool.exe
|
python bereal_exporter.py --exiftool-path /path/to/exiftool.exe
|
||||||
```
|
```
|
||||||
|
|
||||||
5. Export memories only:
|
7. Export only memories and posts (skip realmojis and conversations):
|
||||||
```sh
|
```sh
|
||||||
python bereal_exporter.py --no-realmojis
|
python bereal_exporter.py --no-realmojis --no-conversations
|
||||||
```
|
```
|
||||||
|
|
||||||
|
8. Debug conversations only:
|
||||||
|
```sh
|
||||||
|
python bereal_exporter.py --conversations-only
|
||||||
|
```
|
||||||
|
|
||||||
|
9. Use more workers for faster processing:
|
||||||
|
```sh
|
||||||
|
python bereal_exporter.py --max-workers 8
|
||||||
|
```
|
||||||
|
|
||||||
|
10. Interactive conversation selection (command line):
|
||||||
|
```sh
|
||||||
|
python bereal_exporter.py --conversations-only --interactive-conversations
|
||||||
|
```
|
||||||
|
|
||||||
|
11. Interactive conversation selection (web UI):
|
||||||
|
```sh
|
||||||
|
python bereal_exporter.py --conversations-only --interactive-conversations --web-ui
|
||||||
|
```
|
||||||
|
|
||||||
|
## Interactive Conversation Processing
|
||||||
|
|
||||||
|
For conversation images, the script tries to automatically detect which image should be the main view vs selfie view, but sometimes it gets it wrong. That's where the interactive modes come in handy.
|
||||||
|
|
||||||
|
**Automatic Detection**: The script looks at filenames, image dimensions, and patterns to guess which camera is which. Works most of the time but not always.
|
||||||
|
|
||||||
|
**Interactive Mode**: You can manually choose which image should be the selfie view (front camera overlay):
|
||||||
|
- **Command Line** (`--interactive-conversations`): Opens images in your system viewer, you choose via keyboard
|
||||||
|
- **Web UI** (`--interactive-conversations --web-ui`): Opens a web page where you just click on the selfie image
|
||||||
|
|
||||||
|
The web UI is pretty nice - shows both images side by side, you click the one that should be the selfie view, and it automatically continues processing. Much easier than the command line version.
|
||||||
|
|
||||||
|
**File Naming**: All images get descriptive names so you know what's what:
|
||||||
|
- `2022-09-10_16-35-30_main-view.webp` (back camera)
|
||||||
|
- `2022-09-10_16-35-30_selfie-view.webp` (front camera)
|
||||||
|
- `2022-09-10_16-35-30_composited.webp` (combined image with selfie overlaid)
|
||||||
|
|
||||||
|
## What Gets Exported
|
||||||
|
|
||||||
|
The script exports different types of content to organized folders:
|
||||||
|
|
||||||
|
- **Posts**: Your daily BeReal posts (main-view/selfie-view images + composited versions)
|
||||||
|
- **Memories**: Same as posts but with richer metadata (location, multiple timestamps)
|
||||||
|
- **Realmojis**: Your reaction images
|
||||||
|
- **Conversations**: Images from private conversations
|
||||||
|
|
||||||
|
All images get proper EXIF metadata with:
|
||||||
|
- Original timestamps (converted to local timezone using GPS when available)
|
||||||
|
- GPS coordinates (when available)
|
||||||
|
- Composited images with front camera overlaid on back camera (BeReal style with rounded corners and black border)
|
||||||
|
|
||||||
|
The script automatically detects duplicate content between posts and memories to avoid saving the same image twice.
|
||||||
|
|
||||||
|
## Performance
|
||||||
|
|
||||||
|
Uses parallel processing with configurable worker threads (default 4) for faster exports. Progress bars show real-time status. On a decent machine, expect to process hundreds of images per minute. If you have a fast SSD and good CPU, try bumping up `--max-workers` to 8 or more.
|
||||||
|
|
||||||
## License
|
## License
|
||||||
|
|
||||||
This project is licensed under the MIT License. See the [LICENSE](LICENSE) file for more details.
|
This project is licensed under the MIT License. See the [LICENSE](LICENSE) file for more details.
|
||||||
|
|||||||
1925
bereal_exporter.py
1925
bereal_exporter.py
@@ -1,17 +1,19 @@
|
|||||||
import argparse
|
import argparse
|
||||||
import curses
|
|
||||||
import json
|
import json
|
||||||
import os
|
import os
|
||||||
import sys
|
import glob
|
||||||
from datetime import datetime as dt
|
from datetime import datetime as dt
|
||||||
from datetime import timezone
|
|
||||||
from shutil import copy2 as cp
|
from shutil import copy2 as cp
|
||||||
from typing import Optional
|
|
||||||
|
|
||||||
import pytz
|
|
||||||
from exiftool import ExifToolHelper as et
|
|
||||||
from PIL import Image, ImageDraw
|
from PIL import Image, ImageDraw
|
||||||
|
import pytz
|
||||||
from timezonefinder import TimezoneFinder
|
from timezonefinder import TimezoneFinder
|
||||||
|
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||||
|
from functools import partial
|
||||||
|
from tqdm import tqdm
|
||||||
|
from tqdm.contrib.logging import logging_redirect_tqdm
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from exiftool import ExifToolHelper as et
|
||||||
|
|
||||||
|
|
||||||
def init_parser() -> argparse.Namespace:
|
def init_parser() -> argparse.Namespace:
|
||||||
@@ -20,566 +22,1565 @@ def init_parser() -> argparse.Namespace:
|
|||||||
"""
|
"""
|
||||||
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
|
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"-v", "--verbose", action="store_true", help="Explain what is being done."
|
"-v",
|
||||||
|
"--verbose",
|
||||||
|
default=False,
|
||||||
|
action="store_true",
|
||||||
|
help="Explain what is being done",
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--exiftool-path", dest="exiftool_path", help="Path to ExifTool executable."
|
"--exiftool-path",
|
||||||
|
dest="exiftool_path",
|
||||||
|
type=str,
|
||||||
|
help="Set the path to the ExifTool executable (needed if it isn't on the $PATH)",
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"-t",
|
"-t",
|
||||||
"--timespan",
|
"--timespan",
|
||||||
type=str,
|
type=str,
|
||||||
help="DD.MM.YYYY-DD.MM.YYYY or wildcards with '*'.",
|
help="Exports the given timespan\n"
|
||||||
|
"Valid format: 'DD.MM.YYYY-DD.MM.YYYY'\n"
|
||||||
|
"Wildcards can be used: 'DD.MM.YYYY-*'",
|
||||||
)
|
)
|
||||||
parser.add_argument("-y", "--year", type=int, help="Exports the given year.")
|
parser.add_argument("-y", "--year", type=int, help="Exports the given year")
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"-p",
|
"-p",
|
||||||
"--out-path",
|
"--out-path",
|
||||||
dest="out_path",
|
dest="out_path",
|
||||||
default="./out",
|
type=str,
|
||||||
help="Export output path (default ./out).",
|
default="./output",
|
||||||
|
help="Set a custom output path (default ./output)",
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--bereal-path",
|
"--input-path",
|
||||||
dest="bereal_path",
|
dest="input_path",
|
||||||
default=".",
|
type=str,
|
||||||
help="Path to BeReal data (default ./).",
|
default="./input",
|
||||||
|
help="Set the input folder path containing BeReal export (default ./input)",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--max-workers",
|
||||||
|
dest="max_workers",
|
||||||
|
type=int,
|
||||||
|
default=4,
|
||||||
|
help="Maximum number of parallel workers (default 4)",
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--no-memories",
|
"--no-memories",
|
||||||
dest="memories",
|
dest="memories",
|
||||||
default=True,
|
default=True,
|
||||||
action="store_false",
|
action="store_false",
|
||||||
help="Don't export memories.",
|
help="Don't export the memories",
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--no-realmojis",
|
"--no-realmojis",
|
||||||
dest="realmojis",
|
dest="realmojis",
|
||||||
default=True,
|
default=True,
|
||||||
action="store_false",
|
action="store_false",
|
||||||
help="Don't export realmojis.",
|
help="Don't export the realmojis",
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--no-composites",
|
"--no-posts",
|
||||||
dest="composites",
|
dest="posts",
|
||||||
default=True,
|
default=True,
|
||||||
action="store_false",
|
action="store_false",
|
||||||
help="Don't create a composite image front-on-back for each memory.",
|
help="Don't export the posts",
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--default-timezone",
|
"--no-conversations",
|
||||||
dest="default_tz",
|
dest="conversations",
|
||||||
type=str,
|
default=True,
|
||||||
default=None,
|
action="store_false",
|
||||||
help="If no lat/lon or time zone lookup fails, fall back to this time zone (e.g. 'America/New_York').",
|
help="Don't export the conversations",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--conversations-only",
|
||||||
|
dest="conversations_only",
|
||||||
|
default=False,
|
||||||
|
action="store_true",
|
||||||
|
help="Export only conversations (for debugging)",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--interactive-conversations",
|
||||||
|
dest="interactive_conversations",
|
||||||
|
default=False,
|
||||||
|
action="store_true",
|
||||||
|
help="Manually choose front/back camera for conversation images",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--web-ui",
|
||||||
|
dest="web_ui",
|
||||||
|
default=False,
|
||||||
|
action="store_true",
|
||||||
|
help="Use web UI for interactive conversation selection",
|
||||||
)
|
)
|
||||||
return parser.parse_args()
|
|
||||||
|
|
||||||
|
args = parser.parse_args()
|
||||||
|
if args.year and args.timespan:
|
||||||
|
print("Timespan argument will be prioritized")
|
||||||
|
|
||||||
class CursesLogger:
|
# Handle conversations-only flag
|
||||||
"""
|
if args.conversations_only:
|
||||||
When verbose is True and curses is available, we keep a multi-line log above
|
args.memories = False
|
||||||
and a pinned progress bar at the bottom. This might restart if the window is resized too small.
|
args.posts = False
|
||||||
"""
|
args.realmojis = False
|
||||||
|
args.conversations = True
|
||||||
|
print("Running in conversations-only mode for debugging")
|
||||||
|
|
||||||
def __init__(self, stdscr):
|
return args
|
||||||
self.stdscr = stdscr
|
|
||||||
curses.curs_set(0) # hide cursor
|
|
||||||
|
|
||||||
self.max_y, self.max_x = self.stdscr.getmaxyx()
|
|
||||||
self.log_height = self.max_y - 2 # keep bottom line(s) for the progress bar
|
|
||||||
|
|
||||||
# create log window
|
|
||||||
self.logwin = curses.newwin(self.log_height, self.max_x, 0, 0)
|
|
||||||
self.logwin.scrollok(True)
|
|
||||||
|
|
||||||
# create progress bar window
|
|
||||||
self.pbwin = curses.newwin(1, self.max_x, self.log_height, 0)
|
|
||||||
|
|
||||||
self.log_count = 0
|
|
||||||
|
|
||||||
def print_log(self, text: str, force: bool = False):
|
|
||||||
# Force doesn't matter in curses; we always show
|
|
||||||
self.logwin.addstr(self.log_count, 0, text)
|
|
||||||
self.logwin.clrtoeol()
|
|
||||||
self.log_count += 1
|
|
||||||
if self.log_count >= self.log_height:
|
|
||||||
self.logwin.scroll(1)
|
|
||||||
self.log_count -= 1
|
|
||||||
self.logwin.refresh()
|
|
||||||
|
|
||||||
def show_progress(self, iteration: int, total: int, prefix="", date_str=""):
|
|
||||||
if total == 0:
|
|
||||||
percent = 100
|
|
||||||
else:
|
|
||||||
percent = int(100 * iteration / total)
|
|
||||||
|
|
||||||
bar_length = self.max_x - 30
|
|
||||||
if bar_length < 10:
|
|
||||||
bar_length = 10
|
|
||||||
|
|
||||||
filled_len = bar_length * iteration // max(1, total)
|
|
||||||
bar = "█" * filled_len + "-" * (bar_length - filled_len)
|
|
||||||
|
|
||||||
line_str = f"{prefix} |{bar}| {percent}% - {date_str}"
|
|
||||||
self.pbwin.clear()
|
|
||||||
# Clip if the line is longer than the terminal
|
|
||||||
self.pbwin.addstr(0, 0, line_str[: self.max_x - 1])
|
|
||||||
self.pbwin.refresh()
|
|
||||||
|
|
||||||
|
|
||||||
class BasicLogger:
|
|
||||||
"""
|
|
||||||
A fallback / minimal logger if curses fails or if verbose isn't set.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, verbose: bool):
|
|
||||||
self.verbose = verbose
|
|
||||||
|
|
||||||
def print_log(self, text: str, force: bool = False):
|
|
||||||
if self.verbose or force:
|
|
||||||
print(text)
|
|
||||||
|
|
||||||
def show_progress(self, iteration: int, total: int, prefix="", date_str=""):
|
|
||||||
# Overwrites one line with a simple bar
|
|
||||||
if total == 0:
|
|
||||||
percent = 100
|
|
||||||
else:
|
|
||||||
percent = int(100 * iteration / total)
|
|
||||||
|
|
||||||
bar_length = 40
|
|
||||||
filled_len = bar_length * iteration // max(1, total)
|
|
||||||
bar = "=" * filled_len + "-" * (bar_length - filled_len)
|
|
||||||
|
|
||||||
line_str = f"{prefix} |{bar}| {percent}% - {date_str}"
|
|
||||||
sys.stdout.write("\r" + line_str)
|
|
||||||
sys.stdout.flush()
|
|
||||||
|
|
||||||
if iteration == total:
|
|
||||||
print() # newline after finishing
|
|
||||||
|
|
||||||
|
|
||||||
class BeRealExporter:
|
class BeRealExporter:
|
||||||
"""
|
def __init__(self, args: argparse.Namespace):
|
||||||
Main exporter logic, with curses or fallback for logs.
|
self.time_span = self.init_time_span(args)
|
||||||
Using timezone_at only (not closest_timezone_at).
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, args: argparse.Namespace, logger):
|
|
||||||
self.args = args
|
|
||||||
self.logger = logger
|
|
||||||
self.verbose = args.verbose
|
|
||||||
self.exiftool_path = args.exiftool_path
|
self.exiftool_path = args.exiftool_path
|
||||||
self.out_path = args.out_path.rstrip("/")
|
self.out_path = args.out_path.rstrip("/")
|
||||||
self.bereal_path = args.bereal_path.rstrip("/")
|
self.input_path = args.input_path.rstrip("/")
|
||||||
self.create_composites = args.composites
|
self.verbose = args.verbose
|
||||||
self.default_tz = args.default_tz
|
self.max_workers = args.max_workers
|
||||||
|
self.interactive_conversations = args.interactive_conversations
|
||||||
|
self.web_ui = args.web_ui
|
||||||
|
|
||||||
# parse timespan/year
|
# Setup logging for clean progress bars
|
||||||
self.time_span = self.init_time_span(args)
|
if self.verbose:
|
||||||
|
logging.basicConfig(level=logging.INFO, format='%(message)s')
|
||||||
|
self.logger = logging.getLogger(__name__)
|
||||||
|
else:
|
||||||
|
self.logger = None
|
||||||
|
|
||||||
# For lat/lon lookups
|
# Find the BeReal export folder inside input
|
||||||
self.tf = TimezoneFinder()
|
self.bereal_path = self.find_bereal_export_folder()
|
||||||
|
|
||||||
def init_time_span(self, args: argparse.Namespace) -> tuple:
|
@staticmethod
|
||||||
|
def init_time_span(args: argparse.Namespace) -> tuple:
|
||||||
|
"""
|
||||||
|
Initializes time span based on the arguments.
|
||||||
|
"""
|
||||||
if args.timespan:
|
if args.timespan:
|
||||||
try:
|
try:
|
||||||
start_str, end_str = args.timespan.strip().split("-")
|
start_str, end_str = args.timespan.strip().split("-")
|
||||||
if start_str == "*":
|
start = (
|
||||||
start = dt(1970, 1, 1, tzinfo=timezone.utc)
|
dt.fromtimestamp(0)
|
||||||
else:
|
if start_str == "*"
|
||||||
naive_start = dt.strptime(start_str, "%d.%m.%Y")
|
else dt.strptime(start_str, "%d.%m.%Y")
|
||||||
start = naive_start.replace(tzinfo=timezone.utc)
|
)
|
||||||
if end_str == "*":
|
end = dt.now() if end_str == "*" else dt.strptime(end_str, "%d.%m.%Y")
|
||||||
end = dt.now(tz=timezone.utc)
|
|
||||||
else:
|
|
||||||
naive_end = dt.strptime(end_str, "%d.%m.%Y")
|
|
||||||
naive_end = naive_end.replace(hour=23, minute=59, second=59)
|
|
||||||
end = naive_end.replace(tzinfo=timezone.utc)
|
|
||||||
return start, end
|
return start, end
|
||||||
except ValueError:
|
except ValueError:
|
||||||
raise ValueError(
|
raise ValueError(
|
||||||
"Invalid timespan format. Use 'DD.MM.YYYY-DD.MM.YYYY' or '*' wildcard."
|
"Invalid timespan format. Use 'DD.MM.YYYY-DD.MM.YYYY'."
|
||||||
)
|
)
|
||||||
elif args.year:
|
elif args.year:
|
||||||
naive_start = dt(args.year, 1, 1)
|
return dt(args.year, 1, 1), dt(args.year, 12, 31)
|
||||||
naive_end = dt(args.year, 12, 31, 23, 59, 59)
|
|
||||||
return (
|
|
||||||
naive_start.replace(tzinfo=timezone.utc),
|
|
||||||
naive_end.replace(tzinfo=timezone.utc),
|
|
||||||
)
|
|
||||||
else:
|
else:
|
||||||
return (
|
return dt.fromtimestamp(0), dt.now()
|
||||||
dt(1970, 1, 1, tzinfo=timezone.utc),
|
|
||||||
dt.now(tz=timezone.utc),
|
def find_bereal_export_folder(self) -> str:
|
||||||
)
|
"""
|
||||||
|
Finds the BeReal export folder inside the input directory.
|
||||||
|
"""
|
||||||
|
if not os.path.exists(self.input_path):
|
||||||
|
raise FileNotFoundError(f"Input path not found: {self.input_path}")
|
||||||
|
|
||||||
|
# Look for folders that contain the expected structure
|
||||||
|
for item in os.listdir(self.input_path):
|
||||||
|
item_path = os.path.join(self.input_path, item)
|
||||||
|
if os.path.isdir(item_path):
|
||||||
|
# Check if this folder contains the expected JSON files
|
||||||
|
if (os.path.exists(os.path.join(item_path, "memories.json")) or
|
||||||
|
os.path.exists(os.path.join(item_path, "posts.json"))):
|
||||||
|
return item_path
|
||||||
|
|
||||||
|
raise FileNotFoundError("No BeReal export folder found in input directory")
|
||||||
|
|
||||||
def verbose_msg(self, msg: str):
|
def verbose_msg(self, msg: str):
|
||||||
if self.verbose:
|
|
||||||
self.logger.print_log(msg)
|
|
||||||
|
|
||||||
def log(self, text: str, force: bool = False):
|
|
||||||
self.logger.print_log(text, force=force)
|
|
||||||
|
|
||||||
def show_progress(self, i: int, total: int, prefix="", date_str=""):
|
|
||||||
self.logger.show_progress(i, total, prefix, date_str)
|
|
||||||
|
|
||||||
def resolve_img_path(self, path_str: str) -> Optional[str]:
|
|
||||||
if "/post/" in path_str:
|
|
||||||
candidate = os.path.join(
|
|
||||||
self.bereal_path, "Photos/post", os.path.basename(path_str)
|
|
||||||
)
|
|
||||||
if os.path.isfile(candidate):
|
|
||||||
return candidate
|
|
||||||
elif "/bereal/" in path_str:
|
|
||||||
candidate = os.path.join(
|
|
||||||
self.bereal_path, "Photos/bereal", os.path.basename(path_str)
|
|
||||||
)
|
|
||||||
if os.path.isfile(candidate):
|
|
||||||
return candidate
|
|
||||||
|
|
||||||
# fallback
|
|
||||||
p1 = os.path.join(self.bereal_path, "Photos/post", os.path.basename(path_str))
|
|
||||||
p2 = os.path.join(self.bereal_path, "Photos/bereal", os.path.basename(path_str))
|
|
||||||
if os.path.isfile(p1):
|
|
||||||
return p1
|
|
||||||
if os.path.isfile(p2):
|
|
||||||
return p2
|
|
||||||
return None
|
|
||||||
|
|
||||||
def localize_datetime(self, dt_utc: dt, lat: float, lon: float) -> dt:
|
|
||||||
"""
|
"""
|
||||||
Use tf.timezone_at(...) only. If lat/lon missing or fails, fallback to default tz or stay UTC.
|
Prints an explanation of what is being done to the terminal.
|
||||||
|
Uses logging to work nicely with progress bars.
|
||||||
"""
|
"""
|
||||||
if lat is None or lon is None:
|
if self.verbose and self.logger:
|
||||||
# fallback
|
self.logger.info(msg)
|
||||||
if self.default_tz:
|
|
||||||
try:
|
|
||||||
fallback_zone = pytz.timezone(self.default_tz)
|
|
||||||
return dt_utc.astimezone(fallback_zone)
|
|
||||||
except Exception as e:
|
|
||||||
self.verbose_msg(
|
|
||||||
f"Warning: fallback time zone '{self.default_tz}' invalid: {e}"
|
|
||||||
)
|
|
||||||
return dt_utc
|
|
||||||
|
|
||||||
|
def convert_to_local_time(self, utc_dt: dt, location=None) -> dt:
|
||||||
|
"""
|
||||||
|
Converts UTC datetime to local timezone based on location or defaults to America/New_York.
|
||||||
|
"""
|
||||||
|
# Ensure the datetime is timezone-aware (UTC)
|
||||||
|
if utc_dt.tzinfo is None:
|
||||||
|
utc_dt = pytz.UTC.localize(utc_dt)
|
||||||
|
elif utc_dt.tzinfo != pytz.UTC:
|
||||||
|
utc_dt = utc_dt.astimezone(pytz.UTC)
|
||||||
|
|
||||||
|
# Default timezone
|
||||||
|
local_tz = pytz.timezone('America/New_York')
|
||||||
|
|
||||||
|
# Try to get timezone from location if available
|
||||||
|
if location and "latitude" in location and "longitude" in location:
|
||||||
|
try:
|
||||||
|
tf = TimezoneFinder()
|
||||||
|
timezone_str = tf.timezone_at(
|
||||||
|
lat=location["latitude"],
|
||||||
|
lng=location["longitude"]
|
||||||
|
)
|
||||||
|
if timezone_str:
|
||||||
|
local_tz = pytz.timezone(timezone_str)
|
||||||
|
self.verbose_msg(f"Using timezone {timezone_str} from GPS location")
|
||||||
|
else:
|
||||||
|
self.verbose_msg("GPS location found but timezone lookup failed, using America/New_York")
|
||||||
|
except Exception as e:
|
||||||
|
self.verbose_msg(f"Error determining timezone from GPS: {e}, using America/New_York")
|
||||||
|
else:
|
||||||
|
self.verbose_msg("No GPS location, using America/New_York timezone")
|
||||||
|
|
||||||
|
# Convert to local time and return naive datetime for EXIF
|
||||||
|
local_dt = utc_dt.astimezone(local_tz)
|
||||||
|
return local_dt.replace(tzinfo=None)
|
||||||
|
|
||||||
|
def process_memory(self, memory, out_path_memories):
|
||||||
|
"""
|
||||||
|
Processes a single memory (for parallel execution).
|
||||||
|
Saves to posts folder and skips if files already exist to avoid duplicates.
|
||||||
|
"""
|
||||||
|
memory_dt = self.get_datetime_from_str(memory["takenTime"])
|
||||||
|
if not (self.time_span[0] <= memory_dt <= self.time_span[1]):
|
||||||
|
return None
|
||||||
|
|
||||||
|
# Get front and back image paths
|
||||||
|
front_path = os.path.join(self.bereal_path, memory["frontImage"]["path"])
|
||||||
|
back_path = os.path.join(self.bereal_path, memory["backImage"]["path"])
|
||||||
|
|
||||||
|
# Convert to local time for filename (to match EXIF metadata)
|
||||||
|
img_location = memory.get("location", None)
|
||||||
|
local_dt = self.convert_to_local_time(memory_dt, img_location)
|
||||||
|
|
||||||
|
# Create output filenames with descriptive names
|
||||||
|
base_filename = f"{local_dt.strftime('%Y-%m-%d_%H-%M-%S')}"
|
||||||
|
secondary_output = f"{out_path_memories}/{base_filename}_selfie-view.webp" # front camera
|
||||||
|
primary_output = f"{out_path_memories}/{base_filename}_main-view.webp" # back camera
|
||||||
|
composite_output = f"{out_path_memories}/{base_filename}_composited.webp"
|
||||||
|
|
||||||
|
# Skip if files already exist (avoid duplicates from posts)
|
||||||
|
if os.path.exists(primary_output) and os.path.exists(secondary_output) and os.path.exists(composite_output):
|
||||||
|
self.verbose_msg(f"Skipping {base_filename} - already exists from posts export")
|
||||||
|
return f"{base_filename} (skipped - duplicate)"
|
||||||
|
|
||||||
|
# Export individual images (front=secondary, back=primary)
|
||||||
|
if not os.path.exists(secondary_output):
|
||||||
|
self.export_img(front_path, secondary_output, memory_dt, img_location)
|
||||||
|
if not os.path.exists(primary_output):
|
||||||
|
self.export_img(back_path, primary_output, memory_dt, img_location)
|
||||||
|
|
||||||
|
# Create composite image (back/primary as background, front/secondary as overlay - BeReal style)
|
||||||
|
if not os.path.exists(composite_output) and os.path.exists(secondary_output) and os.path.exists(primary_output):
|
||||||
|
self.create_composite_image(primary_output, secondary_output, composite_output, memory_dt, img_location)
|
||||||
|
|
||||||
|
return base_filename
|
||||||
|
|
||||||
|
def process_post(self, post, out_path_posts):
|
||||||
|
"""
|
||||||
|
Processes a single post (for parallel execution).
|
||||||
|
"""
|
||||||
|
post_dt = self.get_datetime_from_str(post["takenAt"])
|
||||||
|
if not (self.time_span[0] <= post_dt <= self.time_span[1]):
|
||||||
|
return None
|
||||||
|
|
||||||
|
# Get primary and secondary image paths
|
||||||
|
primary_path = os.path.join(self.bereal_path, post["primary"]["path"])
|
||||||
|
secondary_path = os.path.join(self.bereal_path, post["secondary"]["path"])
|
||||||
|
|
||||||
|
# Convert to local time for filename (to match EXIF metadata)
|
||||||
|
post_location = post.get("location", None)
|
||||||
|
local_dt = self.convert_to_local_time(post_dt, post_location)
|
||||||
|
|
||||||
|
# Create output filename
|
||||||
|
base_filename = f"{local_dt.strftime('%Y-%m-%d_%H-%M-%S')}"
|
||||||
|
|
||||||
|
# Export individual images
|
||||||
|
primary_output = f"{out_path_posts}/{base_filename}_main-view.webp"
|
||||||
|
secondary_output = f"{out_path_posts}/{base_filename}_selfie-view.webp"
|
||||||
|
composite_output = f"{out_path_posts}/{base_filename}_composited.webp"
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# Export primary image
|
||||||
|
self.export_img(primary_path, primary_output, post_dt, post_location)
|
||||||
|
|
||||||
|
# Export secondary image
|
||||||
|
self.export_img(secondary_path, secondary_output, post_dt, post_location)
|
||||||
|
|
||||||
|
# Create composite image
|
||||||
|
if os.path.exists(primary_output) and os.path.exists(secondary_output):
|
||||||
|
self.create_composite_image(primary_output, secondary_output, composite_output, post_dt, post_location)
|
||||||
|
|
||||||
|
return base_filename
|
||||||
|
|
||||||
|
def interactive_choose_primary_overlay(self, original_files, exported_files, conversation_id, file_id, progress_info=None):
|
||||||
|
"""
|
||||||
|
Interactive mode to let user choose which image is main view vs selfie view.
|
||||||
|
Opens images in system viewer for preview.
|
||||||
|
"""
|
||||||
|
if len(exported_files) != 2:
|
||||||
|
return exported_files[0], exported_files[1] if len(exported_files) > 1 else exported_files[0]
|
||||||
|
|
||||||
|
print(f"\n--- Conversation {conversation_id}, Message ID {file_id} ---")
|
||||||
|
if progress_info:
|
||||||
|
print(f"Progress: {progress_info}")
|
||||||
|
|
||||||
|
# Show image info
|
||||||
try:
|
try:
|
||||||
tz_name = self.tf.timezone_at(lng=lon, lat=lat)
|
from PIL import Image
|
||||||
if tz_name:
|
img1 = Image.open(exported_files[0])
|
||||||
local_zone = pytz.timezone(tz_name)
|
img2 = Image.open(exported_files[1])
|
||||||
return dt_utc.astimezone(local_zone)
|
print(f"Image 1: {os.path.basename(exported_files[0])} ({img1.width}x{img1.height}, {img1.width/img1.height:.2f} ratio)")
|
||||||
else:
|
print(f"Image 2: {os.path.basename(exported_files[1])} ({img2.width}x{img2.height}, {img2.width/img2.height:.2f} ratio)")
|
||||||
# fallback
|
img1.close()
|
||||||
if self.default_tz:
|
img2.close()
|
||||||
try:
|
except Exception:
|
||||||
fallback_zone = pytz.timezone(self.default_tz)
|
print(f"Image 1: {os.path.basename(exported_files[0])}")
|
||||||
return dt_utc.astimezone(fallback_zone)
|
print(f"Image 2: {os.path.basename(exported_files[1])}")
|
||||||
except Exception as e:
|
|
||||||
self.verbose_msg(
|
# Open images in system viewer
|
||||||
f"Warning: fallback time zone '{self.default_tz}' invalid: {e}"
|
print("\nOpening images in system viewer...")
|
||||||
)
|
try:
|
||||||
return dt_utc
|
import subprocess
|
||||||
|
import platform
|
||||||
|
|
||||||
|
system = platform.system()
|
||||||
|
for i, img_path in enumerate(exported_files, 1):
|
||||||
|
print(f"Opening Image {i}...")
|
||||||
|
if system == "Darwin": # macOS
|
||||||
|
subprocess.run(["open", img_path], check=False)
|
||||||
|
elif system == "Windows":
|
||||||
|
subprocess.run(["start", img_path], shell=True, check=False)
|
||||||
|
else: # Linux
|
||||||
|
subprocess.run(["xdg-open", img_path], check=False)
|
||||||
|
|
||||||
|
# Small delay between opening images
|
||||||
|
import time
|
||||||
|
time.sleep(0.5)
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
self.verbose_msg(
|
print(f"Could not open images automatically: {e}")
|
||||||
f"Warning: Time zone lookup failed for lat={lat}, lon={lon}: {e}"
|
print("Please manually open the images to view them.")
|
||||||
)
|
|
||||||
if self.default_tz:
|
|
||||||
try:
|
|
||||||
fallback_zone = pytz.timezone(self.default_tz)
|
|
||||||
return dt_utc.astimezone(fallback_zone)
|
|
||||||
except Exception as e2:
|
|
||||||
self.verbose_msg(
|
|
||||||
f"Warning: fallback time zone '{self.default_tz}' invalid: {e2}"
|
|
||||||
)
|
|
||||||
return dt_utc
|
|
||||||
|
|
||||||
def embed_exif(
|
print("\nWhich image should be the SELFIE VIEW (front camera/overlay)?")
|
||||||
self, file_name: str, dt_utc: dt, lat: float = None, lon: float = None
|
print("1. Image 1")
|
||||||
|
print("2. Image 2")
|
||||||
|
print("3. Skip composite creation")
|
||||||
|
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
choice = input("Enter choice (1, 2, or 3): ").strip()
|
||||||
|
if choice == "1":
|
||||||
|
return exported_files[1], exported_files[0] # img2 main, img1 selfie
|
||||||
|
elif choice == "2":
|
||||||
|
return exported_files[0], exported_files[1] # img1 main, img2 selfie
|
||||||
|
elif choice == "3":
|
||||||
|
return None, None # Skip composite
|
||||||
|
else:
|
||||||
|
print("Please enter 1, 2, or 3")
|
||||||
|
except (KeyboardInterrupt, EOFError):
|
||||||
|
print("\nSkipping composite creation...")
|
||||||
|
return None, None
|
||||||
|
|
||||||
|
def web_ui_choose_primary_overlay(self, exported_files, conversation_id, file_id, progress_info=None):
|
||||||
|
"""
|
||||||
|
Web UI mode to let user choose which image is selfie view.
|
||||||
|
Creates a simple HTML page with side-by-side images.
|
||||||
|
"""
|
||||||
|
if len(exported_files) != 2:
|
||||||
|
return exported_files[0], exported_files[1] if len(exported_files) > 1 else exported_files[0]
|
||||||
|
|
||||||
|
import tempfile
|
||||||
|
import webbrowser
|
||||||
|
import base64
|
||||||
|
|
||||||
|
# Create temporary HTML file
|
||||||
|
with tempfile.NamedTemporaryFile(mode='w', suffix='.html', delete=False) as f:
|
||||||
|
# Convert images to base64 for embedding
|
||||||
|
img1_b64 = ""
|
||||||
|
img2_b64 = ""
|
||||||
|
try:
|
||||||
|
with open(exported_files[0], 'rb') as img_file:
|
||||||
|
img1_b64 = base64.b64encode(img_file.read()).decode()
|
||||||
|
with open(exported_files[1], 'rb') as img_file:
|
||||||
|
img2_b64 = base64.b64encode(img_file.read()).decode()
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error reading images: {e}")
|
||||||
|
return self.interactive_choose_primary_overlay([], exported_files, conversation_id, file_id)
|
||||||
|
|
||||||
|
html_content = f"""
|
||||||
|
<!DOCTYPE html>
|
||||||
|
<html>
|
||||||
|
<head>
|
||||||
|
<title>BeReal Conversation Selector</title>
|
||||||
|
<style>
|
||||||
|
body {{ font-family: Arial, sans-serif; margin: 20px; background: #f0f0f0; }}
|
||||||
|
.container {{ max-width: 1200px; margin: 0 auto; background: white; padding: 20px; border-radius: 10px; }}
|
||||||
|
.header {{ text-align: center; margin-bottom: 30px; }}
|
||||||
|
.images {{ display: flex; gap: 20px; justify-content: center; margin-bottom: 30px; }}
|
||||||
|
.image-container {{ text-align: center; cursor: pointer; border: 3px solid #ddd; border-radius: 10px; padding: 10px; transition: all 0.3s; }}
|
||||||
|
.image-container:hover {{ border-color: #007bff; transform: scale(1.02); }}
|
||||||
|
.image-container.selected {{ border-color: #28a745; background: #f8fff8; }}
|
||||||
|
.image-container img {{ max-width: 400px; max-height: 400px; border-radius: 5px; }}
|
||||||
|
.buttons {{ text-align: center; }}
|
||||||
|
.btn {{ padding: 10px 20px; margin: 0 10px; border: none; border-radius: 5px; cursor: pointer; font-size: 16px; }}
|
||||||
|
.btn-primary {{ background: #007bff; color: white; }}
|
||||||
|
.btn-success {{ background: #28a745; color: white; }}
|
||||||
|
.btn-secondary {{ background: #6c757d; color: white; }}
|
||||||
|
.btn:hover {{ opacity: 0.8; }}
|
||||||
|
.instruction {{ text-align: center; margin-bottom: 20px; font-size: 18px; color: #333; }}
|
||||||
|
.result {{ display: none; text-align: center; font-size: 20px; margin-top: 20px; }}
|
||||||
|
</style>
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
<div class="container">
|
||||||
|
<div class="header">
|
||||||
|
<h1>BeReal Conversation Selector</h1>
|
||||||
|
<p>Conversation: {conversation_id} | Message ID: {file_id}</p>
|
||||||
|
<div id="progress-info" style="background: #e9ecef; padding: 10px; border-radius: 5px; margin: 10px 0;">
|
||||||
|
<small>{progress_info if progress_info else 'Processing conversation images...'}</small>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="instruction">
|
||||||
|
<strong>Click on the image that should be the SELFIE VIEW (front camera/overlay)</strong><br>
|
||||||
|
<small>Or press: <kbd>1</kbd> for left image, <kbd>2</kbd> for right image, <kbd>S</kbd> to skip</small>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="images">
|
||||||
|
<div class="image-container" id="img1" onclick="selectImage(1)">
|
||||||
|
<img src="data:image/webp;base64,{img1_b64}" alt="Image 1">
|
||||||
|
<h3>Image 1</h3>
|
||||||
|
<p>{os.path.basename(exported_files[0])}</p>
|
||||||
|
</div>
|
||||||
|
<div class="image-container" id="img2" onclick="selectImage(2)">
|
||||||
|
<img src="data:image/webp;base64,{img2_b64}" alt="Image 2">
|
||||||
|
<h3>Image 2</h3>
|
||||||
|
<p>{os.path.basename(exported_files[1])}</p>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="buttons">
|
||||||
|
<button class="btn btn-secondary" onclick="skip()">Skip Composite</button>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div class="result" id="result"></div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<script>
|
||||||
|
let selectedImage = 0;
|
||||||
|
|
||||||
|
function selectImage(num) {{
|
||||||
|
// Immediately confirm selection
|
||||||
|
document.getElementById('img1').classList.remove('selected');
|
||||||
|
document.getElementById('img2').classList.remove('selected');
|
||||||
|
document.getElementById('img' + num).classList.add('selected');
|
||||||
|
|
||||||
|
// Write result to file immediately
|
||||||
|
writeResult(num.toString());
|
||||||
|
|
||||||
|
document.getElementById('result').innerHTML =
|
||||||
|
'<p style="color: green; font-size: 24px; font-weight: bold;">✓ Image ' + num + ' selected as SELFIE VIEW</p>' +
|
||||||
|
'<p style="color: #666;">Processing... You can close this window.</p>';
|
||||||
|
document.getElementById('result').style.display = 'block';
|
||||||
|
|
||||||
|
// Hide the interface
|
||||||
|
document.querySelector('.buttons').style.display = 'none';
|
||||||
|
document.querySelector('.instruction').style.display = 'none';
|
||||||
|
document.querySelector('.images').style.opacity = '0.5';
|
||||||
|
}}
|
||||||
|
|
||||||
|
function writeResult(value) {{
|
||||||
|
// Use a simple approach - create a temporary anchor to trigger download
|
||||||
|
const blob = new Blob([value], {{type: 'text/plain'}});
|
||||||
|
const url = URL.createObjectURL(blob);
|
||||||
|
const a = document.createElement('a');
|
||||||
|
a.href = url;
|
||||||
|
a.download = 'bereal_selection.txt';
|
||||||
|
a.style.display = 'none';
|
||||||
|
document.body.appendChild(a);
|
||||||
|
a.click();
|
||||||
|
document.body.removeChild(a);
|
||||||
|
URL.revokeObjectURL(url);
|
||||||
|
}}
|
||||||
|
|
||||||
|
// Add keyboard shortcuts
|
||||||
|
document.addEventListener('keydown', function(e) {{
|
||||||
|
if (e.key === '1') {{
|
||||||
|
selectImage(1);
|
||||||
|
}} else if (e.key === '2') {{
|
||||||
|
selectImage(2);
|
||||||
|
}} else if (e.key === 's' || e.key === 'S') {{
|
||||||
|
skip();
|
||||||
|
}}
|
||||||
|
}});
|
||||||
|
|
||||||
|
function skip() {{
|
||||||
|
writeResult('skip');
|
||||||
|
|
||||||
|
document.getElementById('result').innerHTML =
|
||||||
|
'<p style="color: orange; font-size: 24px; font-weight: bold;">⏭ Composite creation SKIPPED</p>' +
|
||||||
|
'<p style="color: #666;">Processing... You can close this window.</p>';
|
||||||
|
document.getElementById('result').style.display = 'block';
|
||||||
|
|
||||||
|
// Hide the interface
|
||||||
|
document.querySelector('.buttons').style.display = 'none';
|
||||||
|
document.querySelector('.instruction').style.display = 'none';
|
||||||
|
document.querySelector('.images').style.opacity = '0.5';
|
||||||
|
}}
|
||||||
|
</script>
|
||||||
|
</body>
|
||||||
|
</html>
|
||||||
|
"""
|
||||||
|
|
||||||
|
f.write(html_content)
|
||||||
|
html_path = f.name
|
||||||
|
|
||||||
|
# Open in browser
|
||||||
|
print(f"Opening web UI for conversation {conversation_id}, message {file_id}...")
|
||||||
|
webbrowser.open('file://' + html_path)
|
||||||
|
|
||||||
|
# Wait for user to make selection in browser
|
||||||
|
print("Make your selection in the web browser (click image or press 1/2/S)...")
|
||||||
|
|
||||||
|
import time
|
||||||
|
selection = None
|
||||||
|
timeout = 300 # 5 minutes timeout
|
||||||
|
start_time = time.time()
|
||||||
|
|
||||||
|
# Look for result file in Downloads folder or current directory
|
||||||
|
possible_result_files = [
|
||||||
|
os.path.expanduser("~/Downloads/bereal_selection.txt"),
|
||||||
|
"bereal_selection.txt",
|
||||||
|
os.path.expanduser("~/Downloads/bereal_selection*.txt")
|
||||||
|
]
|
||||||
|
|
||||||
|
while selection is None and (time.time() - start_time) < timeout:
|
||||||
|
try:
|
||||||
|
# Check for result files
|
||||||
|
for pattern in possible_result_files:
|
||||||
|
if '*' in pattern:
|
||||||
|
import glob
|
||||||
|
files = glob.glob(pattern)
|
||||||
|
if files:
|
||||||
|
result_file = max(files, key=os.path.getctime) # Get newest
|
||||||
|
else:
|
||||||
|
continue
|
||||||
|
else:
|
||||||
|
result_file = pattern
|
||||||
|
if not os.path.exists(result_file):
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Read the result
|
||||||
|
try:
|
||||||
|
with open(result_file, 'r') as f:
|
||||||
|
result = f.read().strip()
|
||||||
|
if result == "1":
|
||||||
|
selection = 1
|
||||||
|
elif result == "2":
|
||||||
|
selection = 2
|
||||||
|
elif result == "skip":
|
||||||
|
selection = "skip"
|
||||||
|
|
||||||
|
# Clean up the result file
|
||||||
|
os.unlink(result_file)
|
||||||
|
break
|
||||||
|
except Exception:
|
||||||
|
continue
|
||||||
|
|
||||||
|
if selection is not None:
|
||||||
|
break
|
||||||
|
|
||||||
|
# Small delay to avoid busy waiting
|
||||||
|
time.sleep(1)
|
||||||
|
|
||||||
|
except (KeyboardInterrupt, EOFError):
|
||||||
|
print("\nSkipping composite creation...")
|
||||||
|
selection = "skip"
|
||||||
|
break
|
||||||
|
|
||||||
|
if selection is None:
|
||||||
|
print("Timeout waiting for selection, skipping...")
|
||||||
|
selection = "skip"
|
||||||
|
|
||||||
|
# Clean up
|
||||||
|
try:
|
||||||
|
os.unlink(html_path)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
if selection == "skip":
|
||||||
|
return None, None
|
||||||
|
elif selection == 1:
|
||||||
|
return exported_files[1], exported_files[0] # img2 main, img1 selfie
|
||||||
|
elif selection == 2:
|
||||||
|
return exported_files[0], exported_files[1] # img1 main, img2 selfie
|
||||||
|
|
||||||
|
# Clean up
|
||||||
|
try:
|
||||||
|
os.unlink(html_path)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def detect_primary_overlay_conversation(self, original_files, exported_files):
|
||||||
|
"""
|
||||||
|
Tries to detect which image should be primary (back camera) vs overlay (front camera)
|
||||||
|
for conversation images based on filename patterns and image properties.
|
||||||
|
"""
|
||||||
|
if len(original_files) != 2 or len(exported_files) != 2:
|
||||||
|
return exported_files[0], exported_files[1]
|
||||||
|
|
||||||
|
# Get original filenames for pattern detection
|
||||||
|
file1_name = os.path.basename(original_files[0]).lower()
|
||||||
|
file2_name = os.path.basename(original_files[1]).lower()
|
||||||
|
|
||||||
|
# Pattern 1: Look for "secondary" keyword (usually front camera)
|
||||||
|
if "secondary" in file1_name and "secondary" not in file2_name:
|
||||||
|
# file1 is secondary (front), file2 is primary (back)
|
||||||
|
return exported_files[1], exported_files[0]
|
||||||
|
elif "secondary" in file2_name and "secondary" not in file1_name:
|
||||||
|
# file2 is secondary (front), file1 is primary (back)
|
||||||
|
return exported_files[0], exported_files[1]
|
||||||
|
|
||||||
|
# Pattern 2: Look for "front" vs "back" keywords
|
||||||
|
if "front" in file1_name and "back" in file2_name:
|
||||||
|
return exported_files[1], exported_files[0] # back primary, front overlay
|
||||||
|
elif "back" in file1_name and "front" in file2_name:
|
||||||
|
return exported_files[0], exported_files[1] # back primary, front overlay
|
||||||
|
|
||||||
|
# Pattern 3: Check image dimensions (front camera often different aspect ratio)
|
||||||
|
try:
|
||||||
|
from PIL import Image
|
||||||
|
img1 = Image.open(exported_files[0])
|
||||||
|
img2 = Image.open(exported_files[1])
|
||||||
|
|
||||||
|
# If one image is significantly smaller or different aspect ratio, it might be front camera
|
||||||
|
ratio1 = img1.width / img1.height
|
||||||
|
ratio2 = img2.width / img2.height
|
||||||
|
|
||||||
|
# If aspect ratios are very different, assume the more square one is front camera
|
||||||
|
if abs(ratio1 - ratio2) > 0.2:
|
||||||
|
if abs(ratio1 - 1.0) < abs(ratio2 - 1.0): # ratio1 is closer to square
|
||||||
|
return exported_files[1], exported_files[0] # img2 primary, img1 overlay
|
||||||
|
else:
|
||||||
|
return exported_files[0], exported_files[1] # img1 primary, img2 overlay
|
||||||
|
|
||||||
|
img1.close()
|
||||||
|
img2.close()
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Pattern 4: Alphabetical order heuristic - often the first file alphabetically is the back camera
|
||||||
|
if file1_name < file2_name:
|
||||||
|
return exported_files[0], exported_files[1] # first alphabetically as primary
|
||||||
|
else:
|
||||||
|
return exported_files[1], exported_files[0] # second alphabetically as primary
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_img_filename(image: dict) -> str:
|
||||||
|
"""
|
||||||
|
Returns the image filename from an image object (frontImage, backImage, primary, secondary).
|
||||||
|
"""
|
||||||
|
return os.path.basename(image["path"])
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def get_datetime_from_str(time: str) -> dt:
|
||||||
|
"""
|
||||||
|
Returns a datetime object from a time key.
|
||||||
|
"""
|
||||||
|
formats = [
|
||||||
|
"%Y-%m-%dT%H:%M:%S.%fZ", # With microseconds
|
||||||
|
"%Y-%m-%dT%H:%M:%S.000Z", # Without microseconds
|
||||||
|
"%Y-%m-%dT%H:%M:%SZ" # No milliseconds at all
|
||||||
|
]
|
||||||
|
|
||||||
|
for format_string in formats:
|
||||||
|
try:
|
||||||
|
return dt.strptime(time, format_string)
|
||||||
|
except ValueError:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Try parsing timestamp if it's a number
|
||||||
|
try:
|
||||||
|
timestamp = float(time)
|
||||||
|
return dt.fromtimestamp(timestamp)
|
||||||
|
except (ValueError, TypeError):
|
||||||
|
pass
|
||||||
|
|
||||||
|
raise ValueError(f"Invalid datetime format: {time}")
|
||||||
|
|
||||||
|
def export_img(
|
||||||
|
self, old_img_name: str, img_name: str, img_dt: dt, img_location=None
|
||||||
):
|
):
|
||||||
final_dt = self.localize_datetime(dt_utc, lat, lon)
|
self.verbose_msg(f"Exporting {old_img_name} to {img_name}")
|
||||||
naive_local = final_dt.replace(tzinfo=None)
|
if img_location:
|
||||||
|
self.verbose_msg(f"Location data available: {img_location['latitude']}, {img_location['longitude']}")
|
||||||
|
else:
|
||||||
|
self.verbose_msg(f"No location data for {img_name}")
|
||||||
|
|
||||||
tags = {
|
if not os.path.isfile(old_img_name):
|
||||||
"EXIF:DateTimeOriginal": naive_local.strftime("%Y:%m:%d %H:%M:%S"),
|
# Try different fallback locations
|
||||||
}
|
fallback_locations = [
|
||||||
if lat is not None and lon is not None:
|
# Direct path from bereal_path
|
||||||
lat_ref = "N" if lat >= 0 else "S"
|
os.path.join(self.bereal_path, old_img_name.lstrip("/")),
|
||||||
lon_ref = "E" if lon >= 0 else "W"
|
# Try with just the filename in different folders
|
||||||
tags.update(
|
os.path.join(self.bereal_path, "Photos/post", os.path.basename(old_img_name)),
|
||||||
{
|
os.path.join(self.bereal_path, "Photos/bereal", os.path.basename(old_img_name)),
|
||||||
"EXIF:GPSLatitude": abs(lat),
|
os.path.join(self.bereal_path, "Photos/realmoji", os.path.basename(old_img_name)),
|
||||||
"EXIF:GPSLatitudeRef": lat_ref,
|
# Original fallback
|
||||||
"EXIF:GPSLongitude": abs(lon),
|
os.path.join(self.bereal_path, old_img_name)
|
||||||
"EXIF:GPSLongitudeRef": lon_ref,
|
]
|
||||||
}
|
|
||||||
)
|
for fallback in fallback_locations:
|
||||||
|
if os.path.isfile(fallback):
|
||||||
|
old_img_name = fallback
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
print(f"File not found in expected locations: {old_img_name}")
|
||||||
|
return
|
||||||
|
|
||||||
|
os.makedirs(os.path.dirname(img_name), exist_ok=True)
|
||||||
|
|
||||||
|
# Detect actual file format and adjust extension accordingly
|
||||||
|
try:
|
||||||
|
with Image.open(old_img_name) as img:
|
||||||
|
actual_format = img.format.lower()
|
||||||
|
self.verbose_msg(f"Detected format: {actual_format} for {old_img_name}")
|
||||||
|
|
||||||
|
if actual_format == 'jpeg' and img_name.endswith('.webp'):
|
||||||
|
# Original is JPEG but we're naming it .webp - fix the extension
|
||||||
|
img_name = img_name.replace('.webp', '.jpg')
|
||||||
|
self.verbose_msg(f"Corrected extension to .jpg for JPEG file: {img_name}")
|
||||||
|
elif actual_format == 'webp' and img_name.endswith('.jpg'):
|
||||||
|
# Original is WEBP but we're naming it .jpg - fix the extension
|
||||||
|
img_name = img_name.replace('.jpg', '.webp')
|
||||||
|
self.verbose_msg(f"Corrected extension to .webp for WEBP file: {img_name}")
|
||||||
|
except Exception as e:
|
||||||
|
self.verbose_msg(f"Could not detect format for {old_img_name}: {e}, using original extension")
|
||||||
|
|
||||||
|
cp(old_img_name, img_name)
|
||||||
|
|
||||||
|
# Convert to local time based on location
|
||||||
|
local_dt = self.convert_to_local_time(img_dt, img_location)
|
||||||
|
|
||||||
|
# Use appropriate tags based on file format
|
||||||
|
if img_name.endswith('.jpg') or img_name.endswith('.jpeg'):
|
||||||
|
# JPEG supports full EXIF metadata
|
||||||
|
tags = {
|
||||||
|
"DateTimeOriginal": local_dt.strftime("%Y:%m:%d %H:%M:%S"),
|
||||||
|
"CreateDate": local_dt.strftime("%Y:%m:%d %H:%M:%S"),
|
||||||
|
"ModifyDate": local_dt.strftime("%Y:%m:%d %H:%M:%S")
|
||||||
|
}
|
||||||
|
if img_location:
|
||||||
|
self.verbose_msg(f"Adding GPS to JPEG {img_name}: {img_location['latitude']}, {img_location['longitude']}")
|
||||||
|
tags.update({
|
||||||
|
"GPSLatitude": img_location["latitude"],
|
||||||
|
"GPSLongitude": img_location["longitude"],
|
||||||
|
"GPSLatitudeRef": "N" if img_location["latitude"] >= 0 else "S",
|
||||||
|
"GPSLongitudeRef": "E" if img_location["longitude"] >= 0 else "W",
|
||||||
|
})
|
||||||
|
else:
|
||||||
|
# WEBP has limited EXIF support, use minimal essential tags
|
||||||
|
tags = {
|
||||||
|
"DateTimeOriginal": local_dt.strftime("%Y:%m:%d %H:%M:%S"),
|
||||||
|
}
|
||||||
|
# Add GPS data if available (WEBP supports basic GPS)
|
||||||
|
if img_location:
|
||||||
|
self.verbose_msg(f"Adding GPS to WEBP {img_name}: {img_location['latitude']}, {img_location['longitude']}")
|
||||||
|
tags.update({
|
||||||
|
"GPSLatitude": img_location["latitude"],
|
||||||
|
"GPSLongitude": img_location["longitude"],
|
||||||
|
"GPSLatitudeRef": "N" if img_location["latitude"] >= 0 else "S",
|
||||||
|
"GPSLongitudeRef": "E" if img_location["longitude"] >= 0 else "W",
|
||||||
|
})
|
||||||
|
|
||||||
try:
|
try:
|
||||||
with (
|
with (
|
||||||
et(executable=self.exiftool_path) if self.exiftool_path else et()
|
et(executable=self.exiftool_path) if self.exiftool_path else et()
|
||||||
) as ex:
|
) as exif_tool:
|
||||||
ex.set_tags(file_name, tags=tags, params=["-P", "-overwrite_original"])
|
result = exif_tool.set_tags(
|
||||||
except Exception as e:
|
img_name, tags=tags, params=["-overwrite_original", "-m", "-q", "-overwrite_original_in_place"]
|
||||||
self.log(f"Error embedding EXIF to {file_name}: {e}", force=True)
|
|
||||||
|
|
||||||
def copy_and_embed(
|
|
||||||
self, old_path: str, new_path: str, dt_utc: dt, lat=None, lon=None
|
|
||||||
) -> Optional[str]:
|
|
||||||
if not old_path or not os.path.isfile(old_path):
|
|
||||||
self.log(f"File not found: {old_path}", force=True)
|
|
||||||
return None
|
|
||||||
|
|
||||||
ext = os.path.splitext(old_path)[1] or ".webp"
|
|
||||||
new_path = os.path.splitext(new_path)[0] + ext
|
|
||||||
os.makedirs(os.path.dirname(new_path), exist_ok=True)
|
|
||||||
cp(old_path, new_path)
|
|
||||||
|
|
||||||
self.embed_exif(new_path, dt_utc, lat, lon)
|
|
||||||
self.verbose_msg(f"Copied & embedded {old_path} -> {new_path}")
|
|
||||||
return new_path
|
|
||||||
|
|
||||||
def create_composite(
|
|
||||||
self,
|
|
||||||
front_path: str,
|
|
||||||
back_path: str,
|
|
||||||
out_path: str,
|
|
||||||
dt_utc: dt,
|
|
||||||
lat=None,
|
|
||||||
lon=None,
|
|
||||||
) -> Optional[str]:
|
|
||||||
ext = os.path.splitext(out_path)[1] or ".webp"
|
|
||||||
out_path = os.path.splitext(out_path)[0] + ext
|
|
||||||
try:
|
|
||||||
with Image.open(back_path) as b_img, Image.open(front_path) as f_img:
|
|
||||||
b_img = b_img.convert("RGBA")
|
|
||||||
f_img = f_img.convert("RGBA")
|
|
||||||
|
|
||||||
b_w, b_h = b_img.size
|
|
||||||
f_w, f_h = f_img.size
|
|
||||||
scale_factor = max(1, b_w // 4)
|
|
||||||
new_f_h = int((scale_factor / f_w) * f_h)
|
|
||||||
front_resized = f_img.resize((scale_factor, new_f_h), Image.LANCZOS)
|
|
||||||
|
|
||||||
# Round corners
|
|
||||||
mask = Image.new("L", front_resized.size, 0)
|
|
||||||
draw = ImageDraw.Draw(mask)
|
|
||||||
radius = min(front_resized.size) // 8
|
|
||||||
draw.rounded_rectangle(
|
|
||||||
[(0, 0), front_resized.size], radius=radius, fill=255
|
|
||||||
)
|
)
|
||||||
front_resized.putalpha(mask)
|
self.verbose_msg(f"ExifTool result: {result}")
|
||||||
|
self.verbose_msg(f"Metadata added to {img_name} (local time: {local_dt.strftime('%Y-%m-%d %H:%M:%S')})")
|
||||||
|
except Exception as e:
|
||||||
|
# WEBP files often have limited EXIF support, try with fewer tags
|
||||||
|
self.verbose_msg(f"Primary metadata write failed for {img_name}, trying fallback approach")
|
||||||
|
try:
|
||||||
|
# Try with just DateTimeOriginal which is more widely supported
|
||||||
|
fallback_tags = {"DateTimeOriginal": local_dt.strftime("%Y:%m:%d %H:%M:%S")}
|
||||||
|
if img_location:
|
||||||
|
fallback_tags.update({
|
||||||
|
"GPSLatitude": img_location["latitude"],
|
||||||
|
"GPSLongitude": img_location["longitude"],
|
||||||
|
"GPSLatitudeRef": "N" if img_location["latitude"] >= 0 else "S",
|
||||||
|
"GPSLongitudeRef": "E" if img_location["longitude"] >= 0 else "W",
|
||||||
|
})
|
||||||
|
|
||||||
b_img.alpha_composite(front_resized, (0, 0))
|
with (
|
||||||
|
et(executable=self.exiftool_path) if self.exiftool_path else et()
|
||||||
|
) as exif_tool:
|
||||||
|
result = exif_tool.set_tags(
|
||||||
|
img_name, tags=fallback_tags, params=["-overwrite_original", "-m", "-q"]
|
||||||
|
)
|
||||||
|
self.verbose_msg(f"Fallback metadata added to {img_name}")
|
||||||
|
except Exception as e2:
|
||||||
|
print(f"WEBP metadata failed for {img_name}, trying JPEG conversion...")
|
||||||
|
# Convert to JPEG as final fallback for reliable EXIF
|
||||||
|
try:
|
||||||
|
jpeg_name = img_name.replace('.webp', '.jpg')
|
||||||
|
with Image.open(img_name) as img:
|
||||||
|
# Convert to RGB if necessary (JPEG doesn't support transparency)
|
||||||
|
if img.mode in ('RGBA', 'LA', 'P'):
|
||||||
|
rgb_img = Image.new('RGB', img.size, (255, 255, 255))
|
||||||
|
if img.mode == 'P':
|
||||||
|
img = img.convert('RGBA')
|
||||||
|
rgb_img.paste(img, mask=img.split()[-1] if img.mode in ('RGBA', 'LA') else None)
|
||||||
|
img = rgb_img
|
||||||
|
img.save(jpeg_name, 'JPEG', quality=95, optimize=True)
|
||||||
|
|
||||||
final = b_img.convert("RGB")
|
# Add EXIF to JPEG (should work reliably)
|
||||||
final.save(out_path)
|
jpeg_tags = {
|
||||||
|
"DateTimeOriginal": local_dt.strftime("%Y:%m:%d %H:%M:%S"),
|
||||||
|
"CreateDate": local_dt.strftime("%Y:%m:%d %H:%M:%S"),
|
||||||
|
"ModifyDate": local_dt.strftime("%Y:%m:%d %H:%M:%S")
|
||||||
|
}
|
||||||
|
if img_location:
|
||||||
|
jpeg_tags.update({
|
||||||
|
"GPSLatitude": img_location["latitude"],
|
||||||
|
"GPSLongitude": img_location["longitude"],
|
||||||
|
"GPSLatitudeRef": "N" if img_location["latitude"] >= 0 else "S",
|
||||||
|
"GPSLongitudeRef": "E" if img_location["longitude"] >= 0 else "W",
|
||||||
|
})
|
||||||
|
|
||||||
|
with (
|
||||||
|
et(executable=self.exiftool_path) if self.exiftool_path else et()
|
||||||
|
) as exif_tool:
|
||||||
|
exif_tool.set_tags(
|
||||||
|
jpeg_name, tags=jpeg_tags, params=["-overwrite_original"]
|
||||||
|
)
|
||||||
|
|
||||||
|
# Remove the original WEBP file since JPEG worked
|
||||||
|
os.remove(img_name)
|
||||||
|
self.verbose_msg(f"Converted to JPEG with full EXIF: {jpeg_name}")
|
||||||
|
|
||||||
|
except Exception as e3:
|
||||||
|
print(f"JPEG conversion also failed for {img_name}: {e3}")
|
||||||
|
# Set file modification time as absolute last resort
|
||||||
|
try:
|
||||||
|
timestamp = local_dt.timestamp()
|
||||||
|
os.utime(img_name, (timestamp, timestamp))
|
||||||
|
self.verbose_msg(f"Set file modification time for {img_name}")
|
||||||
|
except Exception as e4:
|
||||||
|
print(f"Could not set any timestamp for {img_name}: {e4}")
|
||||||
|
|
||||||
|
def create_rounded_mask(self, size, radius):
|
||||||
|
"""
|
||||||
|
Creates a rounded rectangle mask for the given size and radius with anti-aliasing.
|
||||||
|
"""
|
||||||
|
# Use supersampling for smoother edges (4x resolution)
|
||||||
|
scale = 4
|
||||||
|
large_size = (size[0] * scale, size[1] * scale)
|
||||||
|
large_radius = radius * scale
|
||||||
|
|
||||||
|
# Create mask at higher resolution
|
||||||
|
mask = Image.new('L', large_size, 0)
|
||||||
|
draw = ImageDraw.Draw(mask)
|
||||||
|
draw.rounded_rectangle((0, 0, large_size[0], large_size[1]), radius=large_radius, fill=255)
|
||||||
|
|
||||||
|
# Downsample with high-quality resampling for anti-aliasing
|
||||||
|
mask = mask.resize(size, Image.Resampling.LANCZOS)
|
||||||
|
return mask
|
||||||
|
|
||||||
|
def create_composite_image(self, primary_path: str, secondary_path: str, output_path: str, img_dt: dt = None, img_location=None):
|
||||||
|
"""
|
||||||
|
Creates a composite image with the secondary image overlaid on the primary image
|
||||||
|
with padding from the top and left edges and rounded corners.
|
||||||
|
Applies the same metadata as the source images.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
# Open both images
|
||||||
|
primary = Image.open(primary_path)
|
||||||
|
secondary = Image.open(secondary_path)
|
||||||
|
|
||||||
|
# Calculate secondary image size (about 1/4 of primary width)
|
||||||
|
secondary_width = primary.width // 4
|
||||||
|
secondary_height = int(secondary.height * (secondary_width / secondary.width))
|
||||||
|
|
||||||
|
# Resize secondary image
|
||||||
|
secondary_resized = secondary.resize((secondary_width, secondary_height), Image.Resampling.LANCZOS)
|
||||||
|
|
||||||
|
# Create rounded corners for the secondary image
|
||||||
|
corner_radius = min(secondary_width, secondary_height) // 10 # 10% of the smaller dimension
|
||||||
|
border_width = 4
|
||||||
|
|
||||||
|
# Create secondary image with border
|
||||||
|
bordered_width = secondary_width + (border_width * 2)
|
||||||
|
bordered_height = secondary_height + (border_width * 2)
|
||||||
|
|
||||||
|
# Create a black background for the border
|
||||||
|
bordered_image = Image.new('RGBA', (bordered_width, bordered_height), (0, 0, 0, 255))
|
||||||
|
|
||||||
|
# Create a mask with rounded corners for the bordered image
|
||||||
|
border_mask = self.create_rounded_mask((bordered_width, bordered_height), corner_radius + border_width)
|
||||||
|
|
||||||
|
# Apply the border mask
|
||||||
|
bordered_image.putalpha(border_mask)
|
||||||
|
|
||||||
|
# Create a mask with rounded corners for the inner image
|
||||||
|
inner_mask = self.create_rounded_mask((secondary_width, secondary_height), corner_radius)
|
||||||
|
|
||||||
|
# Apply the mask to create rounded corners on the secondary image
|
||||||
|
secondary_with_alpha = Image.new('RGBA', (secondary_width, secondary_height), (0, 0, 0, 0))
|
||||||
|
secondary_rgba = secondary_resized.convert('RGBA')
|
||||||
|
secondary_with_alpha.paste(secondary_rgba, (0, 0))
|
||||||
|
secondary_with_alpha.putalpha(inner_mask)
|
||||||
|
|
||||||
|
# Paste the secondary image onto the bordered background
|
||||||
|
bordered_image.paste(secondary_with_alpha, (border_width, border_width), secondary_with_alpha)
|
||||||
|
|
||||||
|
# Create a copy of the primary image and convert to RGBA for proper alpha blending
|
||||||
|
composite = primary.convert('RGBA')
|
||||||
|
|
||||||
|
# Add padding (20 pixels from top and left)
|
||||||
|
padding = 20
|
||||||
|
|
||||||
|
# Paste the bordered secondary image onto the primary with padding
|
||||||
|
composite.paste(bordered_image, (padding, padding), bordered_image)
|
||||||
|
|
||||||
|
# Convert back to RGB for saving as WEBP
|
||||||
|
final_composite = Image.new('RGB', composite.size, (255, 255, 255))
|
||||||
|
final_composite.paste(composite, mask=composite.split()[-1] if composite.mode == 'RGBA' else None)
|
||||||
|
|
||||||
|
# Save the composite image
|
||||||
|
final_composite.save(output_path, "WEBP", quality=95)
|
||||||
|
|
||||||
|
# Apply metadata to composite if datetime is provided
|
||||||
|
if img_dt:
|
||||||
|
# Convert to local time based on location
|
||||||
|
local_dt = self.convert_to_local_time(img_dt, img_location)
|
||||||
|
|
||||||
|
tags = {
|
||||||
|
"DateTimeOriginal": local_dt.strftime("%Y:%m:%d %H:%M:%S"),
|
||||||
|
"CreateDate": local_dt.strftime("%Y:%m:%d %H:%M:%S"),
|
||||||
|
"ModifyDate": local_dt.strftime("%Y:%m:%d %H:%M:%S")
|
||||||
|
}
|
||||||
|
|
||||||
|
if img_location:
|
||||||
|
tags.update(
|
||||||
|
{
|
||||||
|
"GPSLatitude": img_location["latitude"],
|
||||||
|
"GPSLongitude": img_location["longitude"],
|
||||||
|
"GPSLatitudeRef": "N" if img_location["latitude"] >= 0 else "S",
|
||||||
|
"GPSLongitudeRef": "E" if img_location["longitude"] >= 0 else "W",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
with (
|
||||||
|
et(executable=self.exiftool_path) if self.exiftool_path else et()
|
||||||
|
) as exif_tool:
|
||||||
|
exif_tool.set_tags(
|
||||||
|
output_path, tags=tags, params=["-P", "-overwrite_original", "-m"]
|
||||||
|
)
|
||||||
|
self.verbose_msg(f"Metadata added to composite: {output_path}")
|
||||||
|
except Exception as e:
|
||||||
|
# Try fallback approach for composite
|
||||||
|
try:
|
||||||
|
fallback_tags = {"DateTimeOriginal": local_dt.strftime("%Y:%m:%d %H:%M:%S")}
|
||||||
|
if img_location:
|
||||||
|
fallback_tags.update({
|
||||||
|
"GPSLatitude": img_location["latitude"],
|
||||||
|
"GPSLongitude": img_location["longitude"],
|
||||||
|
})
|
||||||
|
|
||||||
|
with (
|
||||||
|
et(executable=self.exiftool_path) if self.exiftool_path else et()
|
||||||
|
) as exif_tool:
|
||||||
|
exif_tool.set_tags(
|
||||||
|
output_path, tags=fallback_tags, params=["-overwrite_original", "-m", "-q"]
|
||||||
|
)
|
||||||
|
self.verbose_msg(f"Fallback metadata added to composite: {output_path}")
|
||||||
|
except Exception as e2:
|
||||||
|
print(f"WEBP metadata failed for composite {output_path}, trying JPEG conversion...")
|
||||||
|
# Convert composite to JPEG as fallback
|
||||||
|
try:
|
||||||
|
jpeg_path = output_path.replace('.webp', '.jpg')
|
||||||
|
with Image.open(output_path) as img:
|
||||||
|
if img.mode in ('RGBA', 'LA', 'P'):
|
||||||
|
rgb_img = Image.new('RGB', img.size, (255, 255, 255))
|
||||||
|
if img.mode == 'P':
|
||||||
|
img = img.convert('RGBA')
|
||||||
|
rgb_img.paste(img, mask=img.split()[-1] if img.mode in ('RGBA', 'LA') else None)
|
||||||
|
img = rgb_img
|
||||||
|
img.save(jpeg_path, 'JPEG', quality=95, optimize=True)
|
||||||
|
|
||||||
|
# Add full EXIF to JPEG
|
||||||
|
jpeg_tags = {
|
||||||
|
"DateTimeOriginal": local_dt.strftime("%Y:%m:%d %H:%M:%S"),
|
||||||
|
"CreateDate": local_dt.strftime("%Y:%m:%d %H:%M:%S"),
|
||||||
|
"ModifyDate": local_dt.strftime("%Y:%m:%d %H:%M:%S")
|
||||||
|
}
|
||||||
|
if img_location:
|
||||||
|
jpeg_tags.update({
|
||||||
|
"GPSLatitude": img_location["latitude"],
|
||||||
|
"GPSLongitude": img_location["longitude"],
|
||||||
|
"GPSLatitudeRef": "N" if img_location["latitude"] >= 0 else "S",
|
||||||
|
"GPSLongitudeRef": "E" if img_location["longitude"] >= 0 else "W",
|
||||||
|
})
|
||||||
|
|
||||||
|
with (
|
||||||
|
et(executable=self.exiftool_path) if self.exiftool_path else et()
|
||||||
|
) as exif_tool:
|
||||||
|
exif_tool.set_tags(
|
||||||
|
jpeg_path, tags=jpeg_tags, params=["-overwrite_original"]
|
||||||
|
)
|
||||||
|
|
||||||
|
os.remove(output_path) # Remove WEBP since JPEG worked
|
||||||
|
self.verbose_msg(f"Converted composite to JPEG with full EXIF: {jpeg_path}")
|
||||||
|
|
||||||
|
except Exception as e3:
|
||||||
|
# Set file modification time as absolute last resort
|
||||||
|
try:
|
||||||
|
timestamp = local_dt.timestamp()
|
||||||
|
os.utime(output_path, (timestamp, timestamp))
|
||||||
|
self.verbose_msg(f"Set file modification time for composite: {output_path}")
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
self.verbose_msg(f"Created composite image with rounded corners: {output_path}")
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
self.log(
|
print(f"Error creating composite image: {e}")
|
||||||
f"Error creating composite for {front_path} & {back_path}: {e}",
|
# Fallback to just copying the primary image WITH METADATA
|
||||||
force=True,
|
cp(primary_path, output_path)
|
||||||
)
|
|
||||||
return None
|
|
||||||
|
|
||||||
# embed exif
|
# Apply metadata to fallback copy if datetime is provided
|
||||||
self.embed_exif(out_path, dt_utc, lat, lon)
|
if img_dt:
|
||||||
self.verbose_msg(f"Composite saved: {out_path}")
|
# Convert to local time based on location
|
||||||
return out_path
|
local_dt = self.convert_to_local_time(img_dt, img_location)
|
||||||
|
|
||||||
def filter_memories_in_timespan(self, memories):
|
tags = {
|
||||||
valid = []
|
"DateTimeOriginal": local_dt.strftime("%Y:%m:%d %H:%M:%S"),
|
||||||
start_dt, end_dt = self.time_span
|
"CreateDate": local_dt.strftime("%Y:%m:%d %H:%M:%S"),
|
||||||
for m in memories:
|
"ModifyDate": local_dt.strftime("%Y:%m:%d %H:%M:%S")
|
||||||
try:
|
}
|
||||||
raw = m["takenTime"].replace("Z", "+00:00")
|
|
||||||
d = dt.fromisoformat(raw).astimezone(timezone.utc)
|
|
||||||
except:
|
|
||||||
continue
|
|
||||||
if start_dt <= d <= end_dt:
|
|
||||||
valid.append(m)
|
|
||||||
return valid
|
|
||||||
|
|
||||||
def export_memories(self, memories):
|
if img_location:
|
||||||
memories = self.filter_memories_in_timespan(memories)
|
tags.update(
|
||||||
|
{
|
||||||
|
"GPSLatitude": img_location["latitude"],
|
||||||
|
"GPSLongitude": img_location["longitude"],
|
||||||
|
"GPSLatitudeRef": "N" if img_location["latitude"] >= 0 else "S",
|
||||||
|
"GPSLongitudeRef": "E" if img_location["longitude"] >= 0 else "W",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
def dt_key(x):
|
try:
|
||||||
try:
|
with (
|
||||||
return dt.fromisoformat(x["takenTime"].replace("Z", "+00:00"))
|
et(executable=self.exiftool_path) if self.exiftool_path else et()
|
||||||
except:
|
) as exif_tool:
|
||||||
return dt.min.replace(tzinfo=timezone.utc)
|
exif_tool.set_tags(
|
||||||
|
output_path, tags=tags, params=["-P", "-overwrite_original", "-m"]
|
||||||
|
)
|
||||||
|
self.verbose_msg(f"Metadata added to fallback composite: {output_path}")
|
||||||
|
except Exception as e:
|
||||||
|
# Try fallback approach for fallback composite
|
||||||
|
try:
|
||||||
|
fallback_tags = {"DateTimeOriginal": local_dt.strftime("%Y:%m:%d %H:%M:%S")}
|
||||||
|
if img_location:
|
||||||
|
fallback_tags.update({
|
||||||
|
"GPSLatitude": img_location["latitude"],
|
||||||
|
"GPSLongitude": img_location["longitude"],
|
||||||
|
})
|
||||||
|
|
||||||
memories.sort(key=dt_key)
|
with (
|
||||||
|
et(executable=self.exiftool_path) if self.exiftool_path else et()
|
||||||
|
) as exif_tool:
|
||||||
|
exif_tool.set_tags(
|
||||||
|
output_path, tags=fallback_tags, params=["-overwrite_original", "-m", "-q"]
|
||||||
|
)
|
||||||
|
self.verbose_msg(f"Fallback metadata added to fallback composite: {output_path}")
|
||||||
|
except Exception as e2:
|
||||||
|
print(f"WEBP metadata failed for fallback composite {output_path}, trying JPEG conversion...")
|
||||||
|
# Convert fallback composite to JPEG
|
||||||
|
try:
|
||||||
|
jpeg_path = output_path.replace('.webp', '.jpg')
|
||||||
|
with Image.open(output_path) as img:
|
||||||
|
if img.mode in ('RGBA', 'LA', 'P'):
|
||||||
|
rgb_img = Image.new('RGB', img.size, (255, 255, 255))
|
||||||
|
if img.mode == 'P':
|
||||||
|
img = img.convert('RGBA')
|
||||||
|
rgb_img.paste(img, mask=img.split()[-1] if img.mode in ('RGBA', 'LA') else None)
|
||||||
|
img = rgb_img
|
||||||
|
img.save(jpeg_path, 'JPEG', quality=95, optimize=True)
|
||||||
|
|
||||||
out_mem = os.path.join(self.out_path, "memories")
|
# Add full EXIF to JPEG
|
||||||
out_cmp = os.path.join(self.out_path, "composites")
|
jpeg_tags = {
|
||||||
os.makedirs(out_mem, exist_ok=True)
|
"DateTimeOriginal": local_dt.strftime("%Y:%m:%d %H:%M:%S"),
|
||||||
os.makedirs(out_cmp, exist_ok=True)
|
"CreateDate": local_dt.strftime("%Y:%m:%d %H:%M:%S"),
|
||||||
|
"ModifyDate": local_dt.strftime("%Y:%m:%d %H:%M:%S")
|
||||||
|
}
|
||||||
|
if img_location:
|
||||||
|
jpeg_tags.update({
|
||||||
|
"GPSLatitude": img_location["latitude"],
|
||||||
|
"GPSLongitude": img_location["longitude"],
|
||||||
|
"GPSLatitudeRef": "N" if img_location["latitude"] >= 0 else "S",
|
||||||
|
"GPSLongitudeRef": "E" if img_location["longitude"] >= 0 else "W",
|
||||||
|
})
|
||||||
|
|
||||||
total = len(memories)
|
with (
|
||||||
for i, mem in enumerate(memories, start=1):
|
et(executable=self.exiftool_path) if self.exiftool_path else et()
|
||||||
raw = mem["takenTime"].replace("Z", "+00:00")
|
) as exif_tool:
|
||||||
m_dt_utc = dt.fromisoformat(raw).astimezone(timezone.utc)
|
exif_tool.set_tags(
|
||||||
loc = mem.get("location", {})
|
jpeg_path, tags=jpeg_tags, params=["-overwrite_original"]
|
||||||
lat = loc.get("latitude")
|
)
|
||||||
lon = loc.get("longitude")
|
|
||||||
|
|
||||||
front_src = self.resolve_img_path(mem["frontImage"]["path"])
|
os.remove(output_path) # Remove WEBP since JPEG worked
|
||||||
back_src = self.resolve_img_path(mem["backImage"]["path"])
|
self.verbose_msg(f"Converted fallback composite to JPEG with full EXIF: {jpeg_path}")
|
||||||
if not front_src or not back_src:
|
|
||||||
self.log(
|
|
||||||
"Skipping memory due to missing front/back images.", force=True
|
|
||||||
)
|
|
||||||
self.show_progress(i, total, prefix="Exporting Memories")
|
|
||||||
continue
|
|
||||||
|
|
||||||
base_ts = m_dt_utc.strftime("%Y-%m-%d_%H-%M-%S")
|
except Exception as e3:
|
||||||
front_out = os.path.join(out_mem, f"{base_ts}_front")
|
# Set file modification time as absolute last resort
|
||||||
back_out = os.path.join(out_mem, f"{base_ts}_back")
|
try:
|
||||||
|
timestamp = local_dt.timestamp()
|
||||||
|
os.utime(output_path, (timestamp, timestamp))
|
||||||
|
self.verbose_msg(f"Set file modification time for fallback composite: {output_path}")
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
final_front = self.copy_and_embed(front_src, front_out, m_dt_utc, lat, lon)
|
def export_memories(self, memories: list):
|
||||||
final_back = self.copy_and_embed(back_src, back_out, m_dt_utc, lat, lon)
|
"""
|
||||||
|
Exports all memories to the posts folder to avoid duplicates.
|
||||||
|
|
||||||
if self.create_composites and final_front and final_back:
|
MEMORIES vs POSTS:
|
||||||
comp_out = os.path.join(out_cmp, f"{base_ts}_composite")
|
- Often contain the same images with different metadata formats
|
||||||
self.create_composite(
|
- Memories: frontImage/backImage, takenTime, berealMoment, location data
|
||||||
final_front, final_back, comp_out, m_dt_utc, lat, lon
|
- Posts: primary/secondary, takenAt, limited metadata
|
||||||
)
|
- Combined into posts folder to avoid duplication
|
||||||
|
|
||||||
self.show_progress(
|
Creates composite images with backImage as primary and frontImage overlaid (BeReal style).
|
||||||
i,
|
Uses parallel processing for faster execution.
|
||||||
total,
|
"""
|
||||||
prefix="Exporting Memories",
|
out_path_memories = os.path.join(self.out_path, "posts") # Use posts folder
|
||||||
date_str=m_dt_utc.strftime("%Y-%m-%d"),
|
os.makedirs(out_path_memories, exist_ok=True)
|
||||||
)
|
|
||||||
|
|
||||||
def filter_realmojis_in_timespan(self, realmojis):
|
# Filter memories within time span first
|
||||||
valid = []
|
valid_memories = []
|
||||||
start_dt, end_dt = self.time_span
|
for memory in memories:
|
||||||
for r in realmojis:
|
memory_dt = self.get_datetime_from_str(memory["takenTime"])
|
||||||
try:
|
if self.time_span[0] <= memory_dt <= self.time_span[1]:
|
||||||
raw = r["postedAt"].replace("Z", "+00:00")
|
valid_memories.append(memory)
|
||||||
d = dt.fromisoformat(raw).astimezone(timezone.utc)
|
|
||||||
except:
|
|
||||||
continue
|
|
||||||
if start_dt <= d <= end_dt:
|
|
||||||
valid.append(r)
|
|
||||||
return valid
|
|
||||||
|
|
||||||
def export_realmojis(self, realmojis):
|
if not valid_memories:
|
||||||
realmojis = self.filter_realmojis_in_timespan(realmojis)
|
self.verbose_msg("No memories found in the specified time range")
|
||||||
|
return
|
||||||
|
|
||||||
def dt_key(x):
|
self.verbose_msg(f"Processing {len(valid_memories)} memories with {self.max_workers} workers (saving to posts folder)...")
|
||||||
try:
|
|
||||||
return dt.fromisoformat(x["postedAt"].replace("Z", "+00:00"))
|
|
||||||
except:
|
|
||||||
return dt.min.replace(tzinfo=timezone.utc)
|
|
||||||
|
|
||||||
realmojis.sort(key=dt_key)
|
# Process memories in parallel with progress bar
|
||||||
|
with logging_redirect_tqdm() if self.verbose else tqdm(disable=False):
|
||||||
|
with ThreadPoolExecutor(max_workers=self.max_workers) as executor:
|
||||||
|
# Submit all tasks
|
||||||
|
future_to_memory = {
|
||||||
|
executor.submit(self.process_memory, memory, out_path_memories): i
|
||||||
|
for i, memory in enumerate(valid_memories, 1)
|
||||||
|
}
|
||||||
|
|
||||||
out_rm = os.path.join(self.out_path, "realmojis")
|
# Process completed tasks with progress bar
|
||||||
os.makedirs(out_rm, exist_ok=True)
|
with tqdm(total=len(valid_memories), desc="Exporting memories", unit="memory",
|
||||||
|
leave=True, position=0) as pbar:
|
||||||
|
for future in as_completed(future_to_memory):
|
||||||
|
memory_index = future_to_memory[future]
|
||||||
|
try:
|
||||||
|
result = future.result()
|
||||||
|
if result:
|
||||||
|
pbar.set_postfix_str(f"Latest: {result}")
|
||||||
|
pbar.update(1)
|
||||||
|
except Exception as e:
|
||||||
|
tqdm.write(f"Error processing memory {memory_index}: {e}")
|
||||||
|
pbar.update(1)
|
||||||
|
|
||||||
total = len(realmojis)
|
self.verbose_msg(f"Completed exporting {len(valid_memories)} memories")
|
||||||
for i, rm in enumerate(realmojis, start=1):
|
|
||||||
raw = rm["postedAt"].replace("Z", "+00:00")
|
|
||||||
rm_dt_utc = dt.fromisoformat(raw).astimezone(timezone.utc)
|
|
||||||
media_path = os.path.basename(rm["media"]["path"])
|
|
||||||
old_path = os.path.join(self.bereal_path, "Photos", "Realmoji", media_path)
|
|
||||||
|
|
||||||
base_ts = rm_dt_utc.strftime("%Y-%m-%d_%H-%M-%S")
|
def export_realmojis(self, realmojis: list):
|
||||||
out_file = os.path.join(out_rm, base_ts)
|
"""
|
||||||
|
Exports all realmojis from the Photos directory to the corresponding output folder.
|
||||||
|
"""
|
||||||
|
out_path_realmojis = os.path.join(self.out_path, "realmojis")
|
||||||
|
os.makedirs(out_path_realmojis, exist_ok=True)
|
||||||
|
|
||||||
self.copy_and_embed(old_path, out_file, rm_dt_utc)
|
# Filter realmojis within time span first
|
||||||
|
valid_realmojis = []
|
||||||
|
for realmoji in realmojis:
|
||||||
|
realmoji_dt = self.get_datetime_from_str(realmoji["postedAt"])
|
||||||
|
if self.time_span[0] <= realmoji_dt <= self.time_span[1]:
|
||||||
|
valid_realmojis.append((realmoji, realmoji_dt))
|
||||||
|
|
||||||
self.show_progress(
|
if not valid_realmojis:
|
||||||
i,
|
self.verbose_msg("No realmojis found in the specified time range")
|
||||||
total,
|
return
|
||||||
prefix="Exporting Realmojis",
|
|
||||||
date_str=rm_dt_utc.strftime("%Y-%m-%d"),
|
|
||||||
)
|
|
||||||
|
|
||||||
|
# Process with progress bar
|
||||||
|
with logging_redirect_tqdm() if self.verbose else tqdm(disable=False):
|
||||||
|
with tqdm(valid_realmojis, desc="Exporting realmojis", unit="realmoji",
|
||||||
|
leave=True, position=0) as pbar:
|
||||||
|
for realmoji, realmoji_dt in pbar:
|
||||||
|
# Convert to local time for filename (to match EXIF metadata)
|
||||||
|
local_dt = self.convert_to_local_time(realmoji_dt, None)
|
||||||
|
|
||||||
def run_in_curses():
|
img_name = (
|
||||||
"""
|
f"{out_path_realmojis}/{local_dt.strftime('%Y-%m-%d_%H-%M-%S')}.webp"
|
||||||
Attempts to run with curses-based logs/progress if verbose is True.
|
)
|
||||||
If window is too small, might restart or fallback if curses.error triggers.
|
old_img_name = os.path.join(
|
||||||
"""
|
self.bereal_path,
|
||||||
args = init_parser()
|
realmoji["media"]["path"],
|
||||||
|
)
|
||||||
|
self.export_img(old_img_name, img_name, realmoji_dt, None)
|
||||||
|
pbar.set_postfix_str(f"Latest: {local_dt.strftime('%Y-%m-%d_%H-%M-%S')}")
|
||||||
|
|
||||||
if not args.verbose:
|
def export_posts(self, posts: list):
|
||||||
# if not verbose, skip curses
|
"""
|
||||||
run_no_curses(args)
|
Exports all posts from the Photos directory to the corresponding output folder.
|
||||||
return
|
|
||||||
|
|
||||||
def main_curses(stdscr):
|
POSTS vs MEMORIES:
|
||||||
logger = CursesLogger(stdscr)
|
- Posts: Older BeReal format with basic metadata (single timestamp, less location data)
|
||||||
exporter = BeRealExporter(args, logger=logger)
|
- Memories: More recent format with rich metadata (location, multiple timestamps)
|
||||||
|
- Posts have: primary/secondary images, takenAt timestamp, limited metadata
|
||||||
|
- Memories have: frontImage/backImage, takenTime/berealMoment, location data
|
||||||
|
|
||||||
if args.memories:
|
Creates composite images with primary as background and secondary overlaid (BeReal style).
|
||||||
try:
|
Uses parallel processing for faster execution.
|
||||||
with open(
|
"""
|
||||||
os.path.join(args.bereal_path, "memories.json"), encoding="utf-8"
|
out_path_posts = os.path.join(self.out_path, "posts")
|
||||||
) as f:
|
os.makedirs(out_path_posts, exist_ok=True)
|
||||||
mems = json.load(f)
|
|
||||||
exporter.export_memories(mems)
|
|
||||||
except FileNotFoundError:
|
|
||||||
logger.print_log("Error: memories.json file not found.", force=True)
|
|
||||||
except json.JSONDecodeError:
|
|
||||||
logger.print_log("Error decoding memories.json file.", force=True)
|
|
||||||
|
|
||||||
if args.realmojis:
|
# Filter posts within time span first
|
||||||
try:
|
valid_posts = []
|
||||||
with open(
|
for post in posts:
|
||||||
os.path.join(args.bereal_path, "realmojis.json"), encoding="utf-8"
|
post_dt = self.get_datetime_from_str(post["takenAt"])
|
||||||
) as f:
|
if self.time_span[0] <= post_dt <= self.time_span[1]:
|
||||||
rms = json.load(f)
|
valid_posts.append(post)
|
||||||
exporter.export_realmojis(rms)
|
|
||||||
except FileNotFoundError:
|
|
||||||
logger.print_log("Error: realmojis.json file not found.", force=True)
|
|
||||||
except json.JSONDecodeError:
|
|
||||||
logger.print_log("Error decoding realmojis.json file.", force=True)
|
|
||||||
|
|
||||||
logger.print_log("\nAll done.")
|
if not valid_posts:
|
||||||
# Removed the prompt and getch() to allow curses to exit automatically
|
self.verbose_msg("No posts found in the specified time range")
|
||||||
|
return
|
||||||
|
|
||||||
try:
|
self.verbose_msg(f"Processing {len(valid_posts)} posts with {self.max_workers} workers...")
|
||||||
curses.wrapper(main_curses)
|
|
||||||
except curses.error:
|
|
||||||
print("Curses failed. Fallback to non-curses run.")
|
|
||||||
run_no_curses(args)
|
|
||||||
|
|
||||||
|
# Process posts in parallel with progress bar
|
||||||
|
with logging_redirect_tqdm() if self.verbose else tqdm(disable=False):
|
||||||
|
with ThreadPoolExecutor(max_workers=self.max_workers) as executor:
|
||||||
|
# Submit all tasks
|
||||||
|
future_to_post = {
|
||||||
|
executor.submit(self.process_post, post, out_path_posts): i
|
||||||
|
for i, post in enumerate(valid_posts, 1)
|
||||||
|
}
|
||||||
|
|
||||||
def run_no_curses(args: argparse.Namespace):
|
# Process completed tasks with progress bar
|
||||||
"""
|
with tqdm(total=len(valid_posts), desc="Exporting posts", unit="post",
|
||||||
Basic logger with inline progress bar, no curses.
|
leave=True, position=0) as pbar:
|
||||||
"""
|
for future in as_completed(future_to_post):
|
||||||
logger = BasicLogger(verbose=args.verbose)
|
post_index = future_to_post[future]
|
||||||
exporter = BeRealExporter(args, logger=logger)
|
try:
|
||||||
|
result = future.result()
|
||||||
|
if result:
|
||||||
|
pbar.set_postfix_str(f"Latest: {result}")
|
||||||
|
pbar.update(1)
|
||||||
|
except Exception as e:
|
||||||
|
tqdm.write(f"Error processing post {post_index}: {e}")
|
||||||
|
pbar.update(1)
|
||||||
|
|
||||||
if args.memories:
|
self.verbose_msg(f"Completed exporting {len(valid_posts)} posts")
|
||||||
try:
|
|
||||||
with open(
|
|
||||||
os.path.join(args.bereal_path, "memories.json"), encoding="utf-8"
|
|
||||||
) as f:
|
|
||||||
mems = json.load(f)
|
|
||||||
exporter.export_memories(mems)
|
|
||||||
except FileNotFoundError:
|
|
||||||
logger.print_log("Error: memories.json file not found.", force=True)
|
|
||||||
except json.JSONDecodeError:
|
|
||||||
logger.print_log("Error decoding memories.json file.", force=True)
|
|
||||||
|
|
||||||
if args.realmojis:
|
def export_conversations(self):
|
||||||
try:
|
"""
|
||||||
with open(
|
Exports all conversation images from the conversations directory.
|
||||||
os.path.join(args.bereal_path, "realmojis.json"), encoding="utf-8"
|
Groups images by number prefix and creates composite images when pairs exist.
|
||||||
) as f:
|
"""
|
||||||
rms = json.load(f)
|
conversations_path = os.path.join(self.bereal_path, "conversations")
|
||||||
exporter.export_realmojis(rms)
|
if not os.path.exists(conversations_path):
|
||||||
except FileNotFoundError:
|
self.verbose_msg("No conversations folder found")
|
||||||
logger.print_log("Error: realmojis.json file not found.", force=True)
|
return
|
||||||
except json.JSONDecodeError:
|
|
||||||
logger.print_log("Error decoding realmojis.json file.", force=True)
|
out_path_conversations = os.path.join(self.out_path, "conversations")
|
||||||
|
os.makedirs(out_path_conversations, exist_ok=True)
|
||||||
|
|
||||||
|
# Get all conversation folders
|
||||||
|
conversation_folders = [f for f in os.listdir(conversations_path)
|
||||||
|
if os.path.isdir(os.path.join(conversations_path, f))]
|
||||||
|
|
||||||
|
# Count total interactive pairs if in interactive mode
|
||||||
|
total_interactive_pairs = 0
|
||||||
|
if self.interactive_conversations:
|
||||||
|
for conversation_id in conversation_folders:
|
||||||
|
conversation_folder = os.path.join(conversations_path, conversation_id)
|
||||||
|
image_files = glob.glob(os.path.join(conversation_folder, "*.webp"))
|
||||||
|
|
||||||
|
# Quick grouping to count pairs
|
||||||
|
temp_groups = {}
|
||||||
|
for image_file in image_files:
|
||||||
|
filename = os.path.basename(image_file)
|
||||||
|
try:
|
||||||
|
file_id = filename.split('-')[0]
|
||||||
|
if file_id not in temp_groups:
|
||||||
|
temp_groups[file_id] = []
|
||||||
|
temp_groups[file_id].append(image_file)
|
||||||
|
except (ValueError, IndexError):
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Count pairs (groups with exactly 2 images)
|
||||||
|
for file_id, files in temp_groups.items():
|
||||||
|
if len(files) == 2:
|
||||||
|
total_interactive_pairs += 1
|
||||||
|
|
||||||
|
with logging_redirect_tqdm() if self.verbose else tqdm(disable=False):
|
||||||
|
# Create main progress bar
|
||||||
|
main_pbar = tqdm(conversation_folders, desc="Exporting conversations", unit="conversation",
|
||||||
|
leave=True, position=0)
|
||||||
|
|
||||||
|
# Create interactive progress bar if needed
|
||||||
|
if self.interactive_conversations and total_interactive_pairs > 0:
|
||||||
|
interactive_pbar = tqdm(total=total_interactive_pairs,
|
||||||
|
desc="Interactive selections", unit="pair",
|
||||||
|
leave=True, position=1)
|
||||||
|
interactive_count = 0
|
||||||
|
else:
|
||||||
|
interactive_pbar = None
|
||||||
|
interactive_count = 0
|
||||||
|
|
||||||
|
for conversation_id in main_pbar:
|
||||||
|
conversation_folder = os.path.join(conversations_path, conversation_id)
|
||||||
|
out_conversation_folder = os.path.join(out_path_conversations, conversation_id)
|
||||||
|
os.makedirs(out_conversation_folder, exist_ok=True)
|
||||||
|
|
||||||
|
# Get all image files in the conversation
|
||||||
|
image_files = glob.glob(os.path.join(conversation_folder, "*.webp"))
|
||||||
|
|
||||||
|
# Check for chat log to get timestamps and user info
|
||||||
|
chat_log_path = os.path.join(conversation_folder, "chat_log.json")
|
||||||
|
chat_log = []
|
||||||
|
chat_log_by_id = {}
|
||||||
|
if os.path.exists(chat_log_path):
|
||||||
|
try:
|
||||||
|
with open(chat_log_path, 'r', encoding='utf-8') as f:
|
||||||
|
chat_log_data = json.load(f)
|
||||||
|
self.verbose_msg(f"Chat log structure: {type(chat_log_data)}")
|
||||||
|
|
||||||
|
# Handle the actual structure: {"conversationId": "...", "messages": [{"id": "7", "userId": "...", "createdAt": "..."}]}
|
||||||
|
if isinstance(chat_log_data, dict) and "messages" in chat_log_data:
|
||||||
|
messages = chat_log_data["messages"]
|
||||||
|
self.verbose_msg(f"Found {len(messages)} messages in chat log")
|
||||||
|
|
||||||
|
for message in messages:
|
||||||
|
if isinstance(message, dict) and "id" in message:
|
||||||
|
message_id = message["id"]
|
||||||
|
chat_log_by_id[message_id] = message
|
||||||
|
chat_log.append(message)
|
||||||
|
self.verbose_msg(f"Added message ID {message_id}: {message.get('createdAt', 'no timestamp')}")
|
||||||
|
|
||||||
|
elif isinstance(chat_log_data, list):
|
||||||
|
# Fallback: Array of entries
|
||||||
|
chat_log = chat_log_data
|
||||||
|
for entry in chat_log:
|
||||||
|
if isinstance(entry, dict) and "id" in entry:
|
||||||
|
chat_log_by_id[entry["id"]] = entry
|
||||||
|
|
||||||
|
self.verbose_msg(f"Loaded {len(chat_log_by_id)} chat log entries")
|
||||||
|
if chat_log_by_id:
|
||||||
|
sample_key = list(chat_log_by_id.keys())[0]
|
||||||
|
self.verbose_msg(f"Sample entry: ID {sample_key} (type: {type(sample_key)}) -> {chat_log_by_id[sample_key]}")
|
||||||
|
self.verbose_msg(f"All chat log IDs: {list(chat_log_by_id.keys())}") # Show all IDs
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
self.verbose_msg(f"Could not read chat log: {e}")
|
||||||
|
import traceback
|
||||||
|
self.verbose_msg(f"Full error: {traceback.format_exc()}")
|
||||||
|
|
||||||
|
# Group images by their ID prefix (matches chat_log.json id field)
|
||||||
|
image_groups = {}
|
||||||
|
for image_file in image_files:
|
||||||
|
filename = os.path.basename(image_file)
|
||||||
|
try:
|
||||||
|
# Extract ID from filename like "7-gchAVq_kc0wAbj_tMMC3D.webp" -> "7"
|
||||||
|
file_id = filename.split('-')[0]
|
||||||
|
if file_id not in image_groups:
|
||||||
|
image_groups[file_id] = []
|
||||||
|
image_groups[file_id].append(image_file)
|
||||||
|
self.verbose_msg(f"Found image with ID {file_id}: {filename}")
|
||||||
|
except (ValueError, IndexError):
|
||||||
|
# Handle files without ID prefix
|
||||||
|
if 'misc' not in image_groups:
|
||||||
|
image_groups['misc'] = []
|
||||||
|
image_groups['misc'].append(image_file)
|
||||||
|
self.verbose_msg(f"Image without ID prefix: {filename}")
|
||||||
|
|
||||||
|
# Sort files within each group to ensure consistent ordering
|
||||||
|
for file_id in image_groups:
|
||||||
|
image_groups[file_id].sort()
|
||||||
|
|
||||||
|
# Debug: Show all groups found
|
||||||
|
self.verbose_msg(f"Found {len(image_groups)} image groups:")
|
||||||
|
for file_id, files in image_groups.items():
|
||||||
|
self.verbose_msg(f" Group {file_id}: {len(files)} files - {[os.path.basename(f) for f in files]}")
|
||||||
|
|
||||||
|
# Process each group
|
||||||
|
for file_id, group_files in image_groups.items():
|
||||||
|
# Try to extract timestamp and user info from chat log using the file ID
|
||||||
|
img_dt = None
|
||||||
|
user_id = None
|
||||||
|
|
||||||
|
try:
|
||||||
|
self.verbose_msg(f"Looking for ID '{file_id}' (type: {type(file_id)}) in chat log...")
|
||||||
|
self.verbose_msg(f"Available IDs in chat log: {list(chat_log_by_id.keys()) if len(chat_log_by_id) < 20 else list(chat_log_by_id.keys())[:20]}")
|
||||||
|
|
||||||
|
# Try different ID formats (string vs int)
|
||||||
|
found_entry = None
|
||||||
|
if file_id in chat_log_by_id:
|
||||||
|
found_entry = chat_log_by_id[file_id]
|
||||||
|
elif str(file_id) in chat_log_by_id:
|
||||||
|
found_entry = chat_log_by_id[str(file_id)]
|
||||||
|
elif int(file_id) in chat_log_by_id:
|
||||||
|
found_entry = chat_log_by_id[int(file_id)]
|
||||||
|
|
||||||
|
if found_entry:
|
||||||
|
img_dt = self.get_datetime_from_str(found_entry.get('createdAt', ''))
|
||||||
|
user_id = found_entry.get('userId', 'unknown')
|
||||||
|
self.verbose_msg(f"✓ Found chat log entry for ID {file_id}: {found_entry.get('createdAt')} by user {user_id[:8]}...")
|
||||||
|
else:
|
||||||
|
# Use modification time of first file in group
|
||||||
|
img_dt = dt.fromtimestamp(os.path.getmtime(group_files[0]))
|
||||||
|
self.verbose_msg(f"✗ No chat log entry for ID {file_id}, using file modification time")
|
||||||
|
self.verbose_msg(f"✗ Tried looking for: '{file_id}', '{str(file_id)}', {int(file_id) if file_id.isdigit() else 'N/A'}")
|
||||||
|
except (ValueError, KeyError) as e:
|
||||||
|
img_dt = dt.fromtimestamp(os.path.getmtime(group_files[0]))
|
||||||
|
self.verbose_msg(f"✗ Error parsing chat log for ID {file_id}: {e}, using file modification time")
|
||||||
|
|
||||||
|
# Check if within time span
|
||||||
|
if not (self.time_span[0] <= img_dt <= self.time_span[1]):
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Convert to local time for filename (to match EXIF metadata)
|
||||||
|
local_dt = self.convert_to_local_time(img_dt, None)
|
||||||
|
|
||||||
|
# Export individual images with user info
|
||||||
|
exported_files = []
|
||||||
|
for i, image_file in enumerate(group_files):
|
||||||
|
filename = os.path.basename(image_file)
|
||||||
|
# Include user ID in filename if available
|
||||||
|
user_suffix = f"_user_{user_id[:8]}" if user_id and user_id != 'unknown' else ""
|
||||||
|
base_name = os.path.splitext(filename)[0]
|
||||||
|
output_filename = f"{local_dt.strftime('%Y-%m-%d_%H-%M-%S')}_id{file_id}_{i+1}{user_suffix}_{base_name}.webp"
|
||||||
|
output_path = os.path.join(out_conversation_folder, output_filename)
|
||||||
|
|
||||||
|
self.export_img(image_file, output_path, img_dt, None)
|
||||||
|
if os.path.exists(output_path):
|
||||||
|
exported_files.append(output_path)
|
||||||
|
|
||||||
|
# Create composite if we have exactly 2 images
|
||||||
|
if len(exported_files) == 2:
|
||||||
|
user_suffix = f"_user_{user_id[:8]}" if user_id and user_id != 'unknown' else ""
|
||||||
|
composite_filename = f"{local_dt.strftime('%Y-%m-%d_%H-%M-%S')}_id{file_id}{user_suffix}_composited.webp"
|
||||||
|
composite_path = os.path.join(out_conversation_folder, composite_filename)
|
||||||
|
|
||||||
|
# Choose detection method based on interactive mode
|
||||||
|
if self.web_ui and self.interactive_conversations:
|
||||||
|
# Update interactive progress
|
||||||
|
if interactive_pbar:
|
||||||
|
interactive_pbar.set_description(f"Web UI: {conversation_id} msg {file_id}")
|
||||||
|
|
||||||
|
# Create progress info
|
||||||
|
progress_info = f"Interactive pair {interactive_count + 1} of {total_interactive_pairs}" if interactive_pbar else None
|
||||||
|
|
||||||
|
primary_img, overlay_img = self.web_ui_choose_primary_overlay(
|
||||||
|
exported_files, conversation_id, file_id, progress_info
|
||||||
|
)
|
||||||
|
|
||||||
|
# Update progress after selection
|
||||||
|
if interactive_pbar:
|
||||||
|
interactive_pbar.update(1)
|
||||||
|
interactive_pbar.set_description("Interactive selections")
|
||||||
|
|
||||||
|
elif self.interactive_conversations:
|
||||||
|
# Update interactive progress
|
||||||
|
if interactive_pbar:
|
||||||
|
interactive_pbar.set_description(f"CLI: {conversation_id} msg {file_id}")
|
||||||
|
|
||||||
|
# Create progress info
|
||||||
|
progress_info = f"Interactive pair {interactive_count + 1} of {total_interactive_pairs}" if interactive_pbar else None
|
||||||
|
|
||||||
|
primary_img, overlay_img = self.interactive_choose_primary_overlay(
|
||||||
|
group_files, exported_files, conversation_id, file_id, progress_info
|
||||||
|
)
|
||||||
|
|
||||||
|
# Update progress after selection
|
||||||
|
if interactive_pbar:
|
||||||
|
interactive_pbar.update(1)
|
||||||
|
interactive_pbar.set_description("Interactive selections")
|
||||||
|
|
||||||
|
else:
|
||||||
|
primary_img, overlay_img = self.detect_primary_overlay_conversation(group_files, exported_files)
|
||||||
|
|
||||||
|
# Create composite if user didn't skip
|
||||||
|
if primary_img and overlay_img:
|
||||||
|
self.create_composite_image(primary_img, overlay_img, composite_path, img_dt, None)
|
||||||
|
self.verbose_msg(f"Created composite for conversation ID {file_id} by user {user_id[:8] if user_id else 'unknown'}")
|
||||||
|
else:
|
||||||
|
self.verbose_msg(f"Skipped composite for conversation ID {file_id}")
|
||||||
|
|
||||||
|
main_pbar.set_postfix_str(f"Latest: {conversation_id}")
|
||||||
|
self.verbose_msg(f"Exported conversation: {conversation_id}")
|
||||||
|
|
||||||
|
# Close interactive progress bar
|
||||||
|
if interactive_pbar:
|
||||||
|
interactive_pbar.close()
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
run_in_curses()
|
args = init_parser()
|
||||||
|
|
||||||
|
try:
|
||||||
|
exporter = BeRealExporter(args)
|
||||||
|
print(f"Found BeReal export at: {exporter.bereal_path}")
|
||||||
|
except FileNotFoundError as e:
|
||||||
|
print(f"Error: {e}")
|
||||||
|
exit(1)
|
||||||
|
|
||||||
|
if args.memories:
|
||||||
|
try:
|
||||||
|
memories_path = os.path.join(exporter.bereal_path, "memories.json")
|
||||||
|
if os.path.exists(memories_path):
|
||||||
|
with open(memories_path, encoding="utf-8") as f:
|
||||||
|
memories = json.load(f)
|
||||||
|
exporter.export_memories(memories)
|
||||||
|
else:
|
||||||
|
print("memories.json file not found, skipping memories export.")
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
print("Error decoding memories.json file.")
|
||||||
|
|
||||||
|
if args.posts:
|
||||||
|
try:
|
||||||
|
posts_path = os.path.join(exporter.bereal_path, "posts.json")
|
||||||
|
if os.path.exists(posts_path):
|
||||||
|
with open(posts_path, encoding="utf-8") as f:
|
||||||
|
posts = json.load(f)
|
||||||
|
exporter.export_posts(posts)
|
||||||
|
else:
|
||||||
|
print("posts.json file not found, skipping posts export.")
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
print("Error decoding posts.json file.")
|
||||||
|
|
||||||
|
if args.realmojis:
|
||||||
|
try:
|
||||||
|
realmojis_path = os.path.join(exporter.bereal_path, "realmojis.json")
|
||||||
|
if os.path.exists(realmojis_path):
|
||||||
|
with open(realmojis_path, encoding="utf-8") as f:
|
||||||
|
realmojis = json.load(f)
|
||||||
|
exporter.export_realmojis(realmojis)
|
||||||
|
else:
|
||||||
|
print("realmojis.json file not found, skipping realmojis export.")
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
print("Error decoding realmojis.json file.")
|
||||||
|
|
||||||
|
if args.conversations:
|
||||||
|
exporter.export_conversations()
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
Pillow
|
pyexiftool==0.5.6
|
||||||
argparse
|
Pillow>=9.0.0
|
||||||
pytz
|
pytz>=2023.3
|
||||||
timezonefinder
|
timezonefinder>=6.2.0
|
||||||
pyexiftool
|
tqdm>=4.64.0
|
||||||
|
|||||||
Reference in New Issue
Block a user