Merge pull request #607 from vrslev/run-pre-commit
chore: Run pre-commit
This commit is contained in:
commit
3171f212ab
6
.github/scripts/get-latest-tags.sh
vendored
6
.github/scripts/get-latest-tags.sh
vendored
@ -4,9 +4,9 @@ set -e
|
|||||||
set -x
|
set -x
|
||||||
|
|
||||||
get_tag() {
|
get_tag() {
|
||||||
tags=$(git ls-remote --refs --tags --sort='v:refname' "https://github.com/$1" "v$2.*")
|
tags=$(git ls-remote --refs --tags --sort='v:refname' "https://github.com/$1" "v$2.*")
|
||||||
tag=$(echo "$tags" | tail -n1 | sed 's/.*\///')
|
tag=$(echo "$tags" | tail -n1 | sed 's/.*\///')
|
||||||
echo "$tag"
|
echo "$tag"
|
||||||
}
|
}
|
||||||
|
|
||||||
FRAPPE_VERSION=$(get_tag frappe/frappe "$VERSION")
|
FRAPPE_VERSION=$(get_tag frappe/frappe "$VERSION")
|
||||||
|
14
.github/workflows/greetings.yml
vendored
14
.github/workflows/greetings.yml
vendored
@ -6,10 +6,10 @@ jobs:
|
|||||||
greeting:
|
greeting:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/first-interaction@v1
|
- uses: actions/first-interaction@v1
|
||||||
with:
|
with:
|
||||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
issue-message: |
|
issue-message: |
|
||||||
Hello! We're very happy to see your first issue. If your issue is about a problem, go back and check you have copy-pasted all the debug logs you can so we can help you as fast as possible!
|
Hello! We're very happy to see your first issue. If your issue is about a problem, go back and check you have copy-pasted all the debug logs you can so we can help you as fast as possible!
|
||||||
pr-message: |
|
pr-message: |
|
||||||
Hello! Thank you about this PR. Since this is your first PR, please make sure you have described the improvements and your code is well documented.
|
Hello! Thank you about this PR. Since this is your first PR, please make sure you have described the improvements and your code is well documented.
|
||||||
|
@ -37,7 +37,7 @@ repos:
|
|||||||
rev: v2.1.0
|
rev: v2.1.0
|
||||||
hooks:
|
hooks:
|
||||||
- id: codespell
|
- id: codespell
|
||||||
exclude: ".*Dockerfile.*"
|
exclude: "build/bench/Dockerfile"
|
||||||
|
|
||||||
- repo: local
|
- repo: local
|
||||||
hooks:
|
hooks:
|
||||||
|
16
.vscode/extensions.json
vendored
16
.vscode/extensions.json
vendored
@ -1,13 +1,9 @@
|
|||||||
{
|
{
|
||||||
// See https://go.microsoft.com/fwlink/?LinkId=827846 to learn about workspace recommendations.
|
// See https://go.microsoft.com/fwlink/?LinkId=827846 to learn about workspace recommendations.
|
||||||
// Extension identifier format: ${publisher}.${name}. Example: vscode.csharp
|
// Extension identifier format: ${publisher}.${name}. Example: vscode.csharp
|
||||||
|
|
||||||
// List of extensions which should be recommended for users of this workspace.
|
// List of extensions which should be recommended for users of this workspace.
|
||||||
"recommendations": [
|
"recommendations": ["ms-vscode-remote.remote-containers"],
|
||||||
"ms-vscode-remote.remote-containers"
|
// List of extensions recommended by VS Code that should not be recommended for users of this workspace.
|
||||||
],
|
"unwantedRecommendations": []
|
||||||
// List of extensions recommended by VS Code that should not be recommended for users of this workspace.
|
|
||||||
"unwantedRecommendations": [
|
|
||||||
|
|
||||||
]
|
|
||||||
}
|
}
|
@ -14,22 +14,22 @@ appearance, race, religion, or sexual identity and orientation.
|
|||||||
Examples of behavior that contributes to creating a positive environment
|
Examples of behavior that contributes to creating a positive environment
|
||||||
include:
|
include:
|
||||||
|
|
||||||
* Using welcoming and inclusive language
|
- Using welcoming and inclusive language
|
||||||
* Being respectful of differing viewpoints and experiences
|
- Being respectful of differing viewpoints and experiences
|
||||||
* Gracefully accepting constructive criticism
|
- Gracefully accepting constructive criticism
|
||||||
* Focusing on what is best for the community
|
- Focusing on what is best for the community
|
||||||
* Showing empathy towards other community members
|
- Showing empathy towards other community members
|
||||||
|
|
||||||
Examples of unacceptable behavior by participants include:
|
Examples of unacceptable behavior by participants include:
|
||||||
|
|
||||||
* The use of sexualized language or imagery and unwelcome sexual attention or
|
- The use of sexualized language or imagery and unwelcome sexual attention or
|
||||||
advances
|
advances
|
||||||
* Trolling, insulting/derogatory comments, and personal or political attacks
|
- Trolling, insulting/derogatory comments, and personal or political attacks
|
||||||
* Public or private harassment
|
- Public or private harassment
|
||||||
* Publishing others' private information, such as a physical or electronic
|
- Publishing others' private information, such as a physical or electronic
|
||||||
address, without explicit permission
|
address, without explicit permission
|
||||||
* Other conduct which could reasonably be considered inappropriate in a
|
- Other conduct which could reasonably be considered inappropriate in a
|
||||||
professional setting
|
professional setting
|
||||||
|
|
||||||
## Our Responsibilities
|
## Our Responsibilities
|
||||||
|
|
||||||
|
@ -7,9 +7,8 @@ Before publishing a PR, please test builds locally:
|
|||||||
- with VSCode for testing environments (only for frappe/bench image).
|
- with VSCode for testing environments (only for frappe/bench image).
|
||||||
|
|
||||||
On each PR that contains changes relevant to Docker builds, images are being built and tested in our CI (GitHub Actions).
|
On each PR that contains changes relevant to Docker builds, images are being built and tested in our CI (GitHub Actions).
|
||||||
> :evergreen_tree: Please be considerate when pushing commits and opening PR for multiple branches, as the process of building images uses energy and contributes to global warming.
|
|
||||||
>
|
|
||||||
|
|
||||||
|
> :evergreen_tree: Please be considerate when pushing commits and opening PR for multiple branches, as the process of building images uses energy and contributes to global warming.
|
||||||
|
|
||||||
## Lint
|
## Lint
|
||||||
|
|
||||||
@ -38,7 +37,6 @@ To run all the files in repository, run:
|
|||||||
pre-commit run --all-files
|
pre-commit run --all-files
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
## Build
|
## Build
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
@ -52,20 +50,25 @@ docker buildx bake -f docker-bake.hcl *...*
|
|||||||
## Test
|
## Test
|
||||||
|
|
||||||
### Ping site
|
### Ping site
|
||||||
|
|
||||||
Lightweight test that just checks if site will be available after creation.
|
Lightweight test that just checks if site will be available after creation.
|
||||||
|
|
||||||
Frappe:
|
Frappe:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
./tests/test-frappe.sh
|
./tests/test-frappe.sh
|
||||||
```
|
```
|
||||||
|
|
||||||
ERPNext:
|
ERPNext:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
./tests/test-erpnext.sh
|
./tests/test-erpnext.sh
|
||||||
```
|
```
|
||||||
|
|
||||||
### Integration test
|
### Integration test
|
||||||
|
|
||||||
Tests frappe-bench-like commands, for example, `backup` and `restore`.
|
Tests frappe-bench-like commands, for example, `backup` and `restore`.
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
./tests/integration-test.sh
|
./tests/integration-test.sh
|
||||||
```
|
```
|
||||||
|
18
README.md
18
README.md
@ -30,21 +30,21 @@ cd frappe_docker
|
|||||||
|
|
||||||
It takes care of the following:
|
It takes care of the following:
|
||||||
|
|
||||||
* Setting up the desired version of Frappe/ERPNext.
|
- Setting up the desired version of Frappe/ERPNext.
|
||||||
* Setting up all the system requirements: eg. MariaDB, Node, Redis.
|
- Setting up all the system requirements: eg. MariaDB, Node, Redis.
|
||||||
* Configure networking for remote access and setting up LetsEncrypt.
|
- Configure networking for remote access and setting up LetsEncrypt.
|
||||||
|
|
||||||
It doesn't take care of the following:
|
It doesn't take care of the following:
|
||||||
|
|
||||||
* Cron Job to backup sites is not created by default.
|
- Cron Job to backup sites is not created by default.
|
||||||
* Use `CronJob` on k8s or refer wiki for alternatives.
|
- Use `CronJob` on k8s or refer wiki for alternatives.
|
||||||
|
|
||||||
1. Single Server Installs
|
1. Single Server Installs
|
||||||
1. [Single bench](docs/single-bench.md). Easiest Install!
|
1. [Single bench](docs/single-bench.md). Easiest Install!
|
||||||
2. [Multi bench](docs/multi-bench.md)
|
2. [Multi bench](docs/multi-bench.md)
|
||||||
2. Multi Server Installs
|
2. Multi Server Installs
|
||||||
1. [Docker Swarm](docs/docker-swarm.md)
|
1. [Docker Swarm](docs/docker-swarm.md)
|
||||||
2. [Kubernetes](https://helm.erpnext.com)
|
2. [Kubernetes](https://helm.erpnext.com)
|
||||||
3. [Site Operations](docs/site-operations.md)
|
3. [Site Operations](docs/site-operations.md)
|
||||||
4. [Environment Variables](docs/environment-variables.md)
|
4. [Environment Variables](docs/environment-variables.md)
|
||||||
5. [Custom apps for production](docs/custom-apps-for-production.md)
|
5. [Custom apps for production](docs/custom-apps-for-production.md)
|
||||||
|
@ -89,7 +89,7 @@ server {
|
|||||||
client_body_buffer_size 16K;
|
client_body_buffer_size 16K;
|
||||||
client_header_buffer_size 1k;
|
client_header_buffer_size 1k;
|
||||||
|
|
||||||
# enable gzip compresion
|
# enable gzip compression
|
||||||
# based on https://mattstauffer.co/blog/enabling-gzip-on-nginx-servers-including-laravel-forge
|
# based on https://mattstauffer.co/blog/enabling-gzip-on-nginx-servers-including-laravel-forge
|
||||||
gzip on;
|
gzip on;
|
||||||
gzip_http_version 1.1;
|
gzip_http_version 1.1;
|
||||||
|
@ -1,19 +1,22 @@
|
|||||||
#!/home/frappe/frappe-bench/env/bin/python
|
#!/home/frappe/frappe-bench/env/bin/python
|
||||||
|
|
||||||
|
import os
|
||||||
import subprocess
|
import subprocess
|
||||||
import sys
|
import sys
|
||||||
import os
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
bench_dir = os.path.join(os.sep, 'home', 'frappe', 'frappe-bench')
|
bench_dir = os.path.join(os.sep, "home", "frappe", "frappe-bench")
|
||||||
sites_dir = os.path.join(bench_dir, 'sites')
|
sites_dir = os.path.join(bench_dir, "sites")
|
||||||
bench_helper = os.path.join(
|
bench_helper = os.path.join(
|
||||||
bench_dir, 'apps', 'frappe',
|
bench_dir,
|
||||||
'frappe', 'utils', 'bench_helper.py',
|
"apps",
|
||||||
|
"frappe",
|
||||||
|
"frappe",
|
||||||
|
"utils",
|
||||||
|
"bench_helper.py",
|
||||||
)
|
)
|
||||||
cwd = os.getcwd()
|
cwd = os.getcwd()
|
||||||
os.chdir(sites_dir)
|
os.chdir(sites_dir)
|
||||||
subprocess.check_call(
|
subprocess.check_call(
|
||||||
[sys.executable, bench_helper, 'frappe'] + sys.argv[1:],
|
[sys.executable, bench_helper, "frappe"] + sys.argv[1:],
|
||||||
)
|
)
|
||||||
|
@ -1,14 +1,14 @@
|
|||||||
import os
|
import os
|
||||||
import semantic_version
|
|
||||||
import git
|
|
||||||
|
|
||||||
|
import git
|
||||||
|
import semantic_version
|
||||||
from migrate import migrate_sites
|
from migrate import migrate_sites
|
||||||
from utils import (
|
from utils import (
|
||||||
save_version_file,
|
|
||||||
get_apps,
|
get_apps,
|
||||||
|
get_config,
|
||||||
get_container_versions,
|
get_container_versions,
|
||||||
get_version_file,
|
get_version_file,
|
||||||
get_config
|
save_version_file,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -30,12 +30,12 @@ def main():
|
|||||||
version_file_hash = None
|
version_file_hash = None
|
||||||
container_hash = None
|
container_hash = None
|
||||||
|
|
||||||
repo = git.Repo(os.path.join('..', 'apps', app))
|
repo = git.Repo(os.path.join("..", "apps", app))
|
||||||
branch = repo.active_branch.name
|
branch = repo.active_branch.name
|
||||||
|
|
||||||
if branch == 'develop':
|
if branch == "develop":
|
||||||
version_file_hash = version_file.get(app+'_git_hash')
|
version_file_hash = version_file.get(app + "_git_hash")
|
||||||
container_hash = container_versions.get(app+'_git_hash')
|
container_hash = container_versions.get(app + "_git_hash")
|
||||||
if container_hash and version_file_hash:
|
if container_hash and version_file_hash:
|
||||||
if container_hash != version_file_hash:
|
if container_hash != version_file_hash:
|
||||||
is_ready = True
|
is_ready = True
|
||||||
@ -54,7 +54,7 @@ def main():
|
|||||||
|
|
||||||
config = get_config()
|
config = get_config()
|
||||||
|
|
||||||
if is_ready and config.get('maintenance_mode') != 1:
|
if is_ready and config.get("maintenance_mode") != 1:
|
||||||
migrate_sites(maintenance_mode=True)
|
migrate_sites(maintenance_mode=True)
|
||||||
version_file = container_versions
|
version_file = container_versions
|
||||||
save_version_file(version_file)
|
save_version_file(version_file)
|
||||||
|
@ -1,7 +1,8 @@
|
|||||||
import os
|
import os
|
||||||
|
|
||||||
import frappe
|
import frappe
|
||||||
from frappe.utils.backups import scheduled_backup
|
|
||||||
from frappe.utils import cint, get_sites, now
|
from frappe.utils import cint, get_sites, now
|
||||||
|
from frappe.utils.backups import scheduled_backup
|
||||||
|
|
||||||
|
|
||||||
def backup(sites, with_files=False):
|
def backup(sites, with_files=False):
|
||||||
@ -13,12 +14,17 @@ def backup(sites, with_files=False):
|
|||||||
backup_path_db=None,
|
backup_path_db=None,
|
||||||
backup_path_files=None,
|
backup_path_files=None,
|
||||||
backup_path_private_files=None,
|
backup_path_private_files=None,
|
||||||
force=True
|
force=True,
|
||||||
)
|
)
|
||||||
print("database backup taken -", odb.backup_path_db, "- on", now())
|
print("database backup taken -", odb.backup_path_db, "- on", now())
|
||||||
if with_files:
|
if with_files:
|
||||||
print("files backup taken -", odb.backup_path_files, "- on", now())
|
print("files backup taken -", odb.backup_path_files, "- on", now())
|
||||||
print("private files backup taken -", odb.backup_path_private_files, "- on", now())
|
print(
|
||||||
|
"private files backup taken -",
|
||||||
|
odb.backup_path_private_files,
|
||||||
|
"- on",
|
||||||
|
now(),
|
||||||
|
)
|
||||||
frappe.destroy()
|
frappe.destroy()
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,15 +1,16 @@
|
|||||||
import socket
|
import socket
|
||||||
import time
|
import time
|
||||||
|
|
||||||
|
from constants import (
|
||||||
|
DB_HOST_KEY,
|
||||||
|
DB_PORT,
|
||||||
|
DB_PORT_KEY,
|
||||||
|
REDIS_CACHE_KEY,
|
||||||
|
REDIS_QUEUE_KEY,
|
||||||
|
REDIS_SOCKETIO_KEY,
|
||||||
|
)
|
||||||
from six.moves.urllib.parse import urlparse
|
from six.moves.urllib.parse import urlparse
|
||||||
from utils import get_config
|
from utils import get_config
|
||||||
from constants import (
|
|
||||||
REDIS_QUEUE_KEY,
|
|
||||||
REDIS_CACHE_KEY,
|
|
||||||
REDIS_SOCKETIO_KEY,
|
|
||||||
DB_HOST_KEY,
|
|
||||||
DB_PORT_KEY,
|
|
||||||
DB_PORT
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def is_open(ip, port, timeout=30):
|
def is_open(ip, port, timeout=30):
|
||||||
@ -29,7 +30,7 @@ def check_host(ip, port, retry=10, delay=3, print_attempt=True):
|
|||||||
ipup = False
|
ipup = False
|
||||||
for i in range(retry):
|
for i in range(retry):
|
||||||
if print_attempt:
|
if print_attempt:
|
||||||
print("Attempt {i} to connect to {ip}:{port}".format(ip=ip, port=port, i=i+1))
|
print(f"Attempt {i+1} to connect to {ip}:{port}")
|
||||||
if is_open(ip, port):
|
if is_open(ip, port):
|
||||||
ipup = True
|
ipup = True
|
||||||
break
|
break
|
||||||
@ -40,30 +41,26 @@ def check_host(ip, port, retry=10, delay=3, print_attempt=True):
|
|||||||
|
|
||||||
# Check service
|
# Check service
|
||||||
def check_service(
|
def check_service(
|
||||||
retry=10,
|
retry=10, delay=3, print_attempt=True, service_name=None, service_port=None
|
||||||
delay=3,
|
):
|
||||||
print_attempt=True,
|
|
||||||
service_name=None,
|
|
||||||
service_port=None):
|
|
||||||
|
|
||||||
config = get_config()
|
config = get_config()
|
||||||
if not service_name:
|
if not service_name:
|
||||||
service_name = config.get(DB_HOST_KEY, 'mariadb')
|
service_name = config.get(DB_HOST_KEY, "mariadb")
|
||||||
if not service_port:
|
if not service_port:
|
||||||
service_port = config.get(DB_PORT_KEY, DB_PORT)
|
service_port = config.get(DB_PORT_KEY, DB_PORT)
|
||||||
|
|
||||||
is_db_connected = False
|
is_db_connected = False
|
||||||
is_db_connected = check_host(
|
is_db_connected = check_host(
|
||||||
service_name,
|
service_name, service_port, retry, delay, print_attempt
|
||||||
service_port,
|
)
|
||||||
retry,
|
|
||||||
delay,
|
|
||||||
print_attempt)
|
|
||||||
if not is_db_connected:
|
if not is_db_connected:
|
||||||
print("Connection to {service_name}:{service_port} timed out".format(
|
print(
|
||||||
service_name=service_name,
|
"Connection to {service_name}:{service_port} timed out".format(
|
||||||
service_port=service_port,
|
service_name=service_name,
|
||||||
))
|
service_port=service_port,
|
||||||
|
)
|
||||||
|
)
|
||||||
exit(1)
|
exit(1)
|
||||||
|
|
||||||
|
|
||||||
@ -71,14 +68,13 @@ def check_service(
|
|||||||
def check_redis_queue(retry=10, delay=3, print_attempt=True):
|
def check_redis_queue(retry=10, delay=3, print_attempt=True):
|
||||||
check_redis_queue = False
|
check_redis_queue = False
|
||||||
config = get_config()
|
config = get_config()
|
||||||
redis_queue_url = urlparse(config.get(REDIS_QUEUE_KEY, "redis://redis-queue:6379")).netloc
|
redis_queue_url = urlparse(
|
||||||
|
config.get(REDIS_QUEUE_KEY, "redis://redis-queue:6379")
|
||||||
|
).netloc
|
||||||
redis_queue, redis_queue_port = redis_queue_url.split(":")
|
redis_queue, redis_queue_port = redis_queue_url.split(":")
|
||||||
check_redis_queue = check_host(
|
check_redis_queue = check_host(
|
||||||
redis_queue,
|
redis_queue, redis_queue_port, retry, delay, print_attempt
|
||||||
redis_queue_port,
|
)
|
||||||
retry,
|
|
||||||
delay,
|
|
||||||
print_attempt)
|
|
||||||
if not check_redis_queue:
|
if not check_redis_queue:
|
||||||
print("Connection to redis queue timed out")
|
print("Connection to redis queue timed out")
|
||||||
exit(1)
|
exit(1)
|
||||||
@ -88,14 +84,13 @@ def check_redis_queue(retry=10, delay=3, print_attempt=True):
|
|||||||
def check_redis_cache(retry=10, delay=3, print_attempt=True):
|
def check_redis_cache(retry=10, delay=3, print_attempt=True):
|
||||||
check_redis_cache = False
|
check_redis_cache = False
|
||||||
config = get_config()
|
config = get_config()
|
||||||
redis_cache_url = urlparse(config.get(REDIS_CACHE_KEY, "redis://redis-cache:6379")).netloc
|
redis_cache_url = urlparse(
|
||||||
|
config.get(REDIS_CACHE_KEY, "redis://redis-cache:6379")
|
||||||
|
).netloc
|
||||||
redis_cache, redis_cache_port = redis_cache_url.split(":")
|
redis_cache, redis_cache_port = redis_cache_url.split(":")
|
||||||
check_redis_cache = check_host(
|
check_redis_cache = check_host(
|
||||||
redis_cache,
|
redis_cache, redis_cache_port, retry, delay, print_attempt
|
||||||
redis_cache_port,
|
)
|
||||||
retry,
|
|
||||||
delay,
|
|
||||||
print_attempt)
|
|
||||||
if not check_redis_cache:
|
if not check_redis_cache:
|
||||||
print("Connection to redis cache timed out")
|
print("Connection to redis cache timed out")
|
||||||
exit(1)
|
exit(1)
|
||||||
@ -105,14 +100,13 @@ def check_redis_cache(retry=10, delay=3, print_attempt=True):
|
|||||||
def check_redis_socketio(retry=10, delay=3, print_attempt=True):
|
def check_redis_socketio(retry=10, delay=3, print_attempt=True):
|
||||||
check_redis_socketio = False
|
check_redis_socketio = False
|
||||||
config = get_config()
|
config = get_config()
|
||||||
redis_socketio_url = urlparse(config.get(REDIS_SOCKETIO_KEY, "redis://redis-socketio:6379")).netloc
|
redis_socketio_url = urlparse(
|
||||||
|
config.get(REDIS_SOCKETIO_KEY, "redis://redis-socketio:6379")
|
||||||
|
).netloc
|
||||||
redis_socketio, redis_socketio_port = redis_socketio_url.split(":")
|
redis_socketio, redis_socketio_port = redis_socketio_url.split(":")
|
||||||
check_redis_socketio = check_host(
|
check_redis_socketio = check_host(
|
||||||
redis_socketio,
|
redis_socketio, redis_socketio_port, retry, delay, print_attempt
|
||||||
redis_socketio_port,
|
)
|
||||||
retry,
|
|
||||||
delay,
|
|
||||||
print_attempt)
|
|
||||||
if not check_redis_socketio:
|
if not check_redis_socketio:
|
||||||
print("Connection to redis socketio timed out")
|
print("Connection to redis socketio timed out")
|
||||||
exit(1)
|
exit(1)
|
||||||
@ -123,7 +117,7 @@ def main():
|
|||||||
check_redis_queue()
|
check_redis_queue()
|
||||||
check_redis_cache()
|
check_redis_cache()
|
||||||
check_redis_socketio()
|
check_redis_socketio()
|
||||||
print('Connections OK')
|
print("Connections OK")
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
@ -1,13 +1,13 @@
|
|||||||
REDIS_QUEUE_KEY = 'redis_queue'
|
REDIS_QUEUE_KEY = "redis_queue"
|
||||||
REDIS_CACHE_KEY = 'redis_cache'
|
REDIS_CACHE_KEY = "redis_cache"
|
||||||
REDIS_SOCKETIO_KEY = 'redis_socketio'
|
REDIS_SOCKETIO_KEY = "redis_socketio"
|
||||||
DB_HOST_KEY = 'db_host'
|
DB_HOST_KEY = "db_host"
|
||||||
DB_PORT_KEY = 'db_port'
|
DB_PORT_KEY = "db_port"
|
||||||
DB_PORT = 3306
|
DB_PORT = 3306
|
||||||
APP_VERSIONS_JSON_FILE = 'app_versions.json'
|
APP_VERSIONS_JSON_FILE = "app_versions.json"
|
||||||
APPS_TXT_FILE = 'apps.txt'
|
APPS_TXT_FILE = "apps.txt"
|
||||||
COMMON_SITE_CONFIG_FILE = 'common_site_config.json'
|
COMMON_SITE_CONFIG_FILE = "common_site_config.json"
|
||||||
DATE_FORMAT = "%Y%m%d_%H%M%S"
|
DATE_FORMAT = "%Y%m%d_%H%M%S"
|
||||||
RDS_DB = 'rds_db'
|
RDS_DB = "rds_db"
|
||||||
RDS_PRIVILEGES = "SELECT, INSERT, UPDATE, DELETE, CREATE, DROP, INDEX, ALTER, CREATE TEMPORARY TABLES, CREATE VIEW, EVENT, TRIGGER, SHOW VIEW, CREATE ROUTINE, ALTER ROUTINE, EXECUTE, LOCK TABLES"
|
RDS_PRIVILEGES = "SELECT, INSERT, UPDATE, DELETE, CREATE, DROP, INDEX, ALTER, CREATE TEMPORARY TABLES, CREATE VIEW, EVENT, TRIGGER, SHOW VIEW, CREATE ROUTINE, ALTER ROUTINE, EXECUTE, LOCK TABLES"
|
||||||
ARCHIVE_SITES_PATH = '/home/frappe/frappe-bench/sites/archive_sites'
|
ARCHIVE_SITES_PATH = "/home/frappe/frappe-bench/sites/archive_sites"
|
||||||
|
@ -1,20 +1,20 @@
|
|||||||
import argparse
|
import argparse
|
||||||
|
|
||||||
from check_connection import (
|
from check_connection import (
|
||||||
check_service,
|
|
||||||
check_redis_cache,
|
check_redis_cache,
|
||||||
check_redis_queue,
|
check_redis_queue,
|
||||||
check_redis_socketio,
|
check_redis_socketio,
|
||||||
|
check_service,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def parse_args():
|
def parse_args():
|
||||||
parser = argparse.ArgumentParser()
|
parser = argparse.ArgumentParser()
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
'-p',
|
"-p",
|
||||||
'--ping-service',
|
"--ping-service",
|
||||||
dest='ping_services',
|
dest="ping_services",
|
||||||
action='append',
|
action="append",
|
||||||
type=str,
|
type=str,
|
||||||
help='list of services to ping, e.g. doctor -p "postgres:5432" --ping-service "mariadb:3306"',
|
help='list of services to ping, e.g. doctor -p "postgres:5432" --ping-service "mariadb:3306"',
|
||||||
)
|
)
|
||||||
@ -33,15 +33,15 @@ def main():
|
|||||||
check_redis_socketio(retry=1, delay=0, print_attempt=False)
|
check_redis_socketio(retry=1, delay=0, print_attempt=False)
|
||||||
print("Redis SocketIO Connected")
|
print("Redis SocketIO Connected")
|
||||||
|
|
||||||
if(args.ping_services):
|
if args.ping_services:
|
||||||
for service in args.ping_services:
|
for service in args.ping_services:
|
||||||
service_name = None
|
service_name = None
|
||||||
service_port = None
|
service_port = None
|
||||||
|
|
||||||
try:
|
try:
|
||||||
service_name, service_port = service.split(':')
|
service_name, service_port = service.split(":")
|
||||||
except ValueError:
|
except ValueError:
|
||||||
print('Service should be in format host:port, e.g postgres:5432')
|
print("Service should be in format host:port, e.g postgres:5432")
|
||||||
exit(1)
|
exit(1)
|
||||||
|
|
||||||
check_service(
|
check_service(
|
||||||
@ -51,7 +51,7 @@ def main():
|
|||||||
service_name=service_name,
|
service_name=service_name,
|
||||||
service_port=service_port,
|
service_port=service_port,
|
||||||
)
|
)
|
||||||
print("{0}:{1} Connected".format(service_name, service_port))
|
print(f"{service_name}:{service_port} Connected")
|
||||||
|
|
||||||
print("Health check successful")
|
print("Health check successful")
|
||||||
exit(0)
|
exit(0)
|
||||||
|
@ -1,2 +1,3 @@
|
|||||||
import gevent.monkey
|
import gevent.monkey
|
||||||
|
|
||||||
gevent.monkey.patch_all()
|
gevent.monkey.patch_all()
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
import os
|
import os
|
||||||
import frappe
|
|
||||||
|
|
||||||
|
import frappe
|
||||||
from frappe.utils import cint, get_sites
|
from frappe.utils import cint, get_sites
|
||||||
from utils import get_config, save_config
|
from utils import get_config, save_config
|
||||||
|
|
||||||
@ -27,11 +27,12 @@ def migrate_sites(maintenance_mode=False):
|
|||||||
set_maintenance_mode(True)
|
set_maintenance_mode(True)
|
||||||
|
|
||||||
for site in sites:
|
for site in sites:
|
||||||
print('Migrating', site)
|
print("Migrating", site)
|
||||||
frappe.init(site=site)
|
frappe.init(site=site)
|
||||||
frappe.connect()
|
frappe.connect()
|
||||||
try:
|
try:
|
||||||
from frappe.migrate import migrate
|
from frappe.migrate import migrate
|
||||||
|
|
||||||
migrate()
|
migrate()
|
||||||
finally:
|
finally:
|
||||||
frappe.destroy()
|
frappe.destroy()
|
||||||
|
@ -1,15 +1,10 @@
|
|||||||
import os
|
import os
|
||||||
|
|
||||||
import frappe
|
import frappe
|
||||||
import semantic_version
|
import semantic_version
|
||||||
|
|
||||||
from frappe.installer import update_site_config
|
|
||||||
from constants import COMMON_SITE_CONFIG_FILE, RDS_DB, RDS_PRIVILEGES
|
from constants import COMMON_SITE_CONFIG_FILE, RDS_DB, RDS_PRIVILEGES
|
||||||
from utils import (
|
from frappe.installer import update_site_config
|
||||||
run_command,
|
from utils import get_config, get_password, get_site_config, run_command
|
||||||
get_config,
|
|
||||||
get_site_config,
|
|
||||||
get_password,
|
|
||||||
)
|
|
||||||
|
|
||||||
# try to import _new_site from frappe, which could possibly
|
# try to import _new_site from frappe, which could possibly
|
||||||
# exist in either commands.py or installer.py, and so we need
|
# exist in either commands.py or installer.py, and so we need
|
||||||
@ -24,33 +19,43 @@ except ImportError:
|
|||||||
|
|
||||||
def main():
|
def main():
|
||||||
config = get_config()
|
config = get_config()
|
||||||
db_type = 'mariadb'
|
db_type = "mariadb"
|
||||||
db_port = config.get('db_port', 3306)
|
db_port = config.get("db_port", 3306)
|
||||||
db_host = config.get('db_host')
|
db_host = config.get("db_host")
|
||||||
site_name = os.environ.get("SITE_NAME", 'site1.localhost')
|
site_name = os.environ.get("SITE_NAME", "site1.localhost")
|
||||||
db_root_username = os.environ.get("DB_ROOT_USER", 'root')
|
db_root_username = os.environ.get("DB_ROOT_USER", "root")
|
||||||
mariadb_root_password = get_password("MYSQL_ROOT_PASSWORD", 'admin')
|
mariadb_root_password = get_password("MYSQL_ROOT_PASSWORD", "admin")
|
||||||
postgres_root_password = get_password("POSTGRES_PASSWORD")
|
postgres_root_password = get_password("POSTGRES_PASSWORD")
|
||||||
db_root_password = mariadb_root_password
|
db_root_password = mariadb_root_password
|
||||||
|
|
||||||
if postgres_root_password:
|
if postgres_root_password:
|
||||||
db_type = 'postgres'
|
db_type = "postgres"
|
||||||
db_host = os.environ.get("POSTGRES_HOST")
|
db_host = os.environ.get("POSTGRES_HOST")
|
||||||
db_port = 5432
|
db_port = 5432
|
||||||
db_root_password = postgres_root_password
|
db_root_password = postgres_root_password
|
||||||
if not db_host:
|
if not db_host:
|
||||||
db_host = config.get('db_host')
|
db_host = config.get("db_host")
|
||||||
print('Environment variable POSTGRES_HOST not found.')
|
print("Environment variable POSTGRES_HOST not found.")
|
||||||
print('Using db_host from common_site_config.json')
|
print("Using db_host from common_site_config.json")
|
||||||
|
|
||||||
sites_path = os.getcwd()
|
sites_path = os.getcwd()
|
||||||
common_site_config_path = os.path.join(sites_path, COMMON_SITE_CONFIG_FILE)
|
common_site_config_path = os.path.join(sites_path, COMMON_SITE_CONFIG_FILE)
|
||||||
update_site_config("root_login", db_root_username, validate = False, site_config_path = common_site_config_path)
|
update_site_config(
|
||||||
update_site_config("root_password", db_root_password, validate = False, site_config_path = common_site_config_path)
|
"root_login",
|
||||||
|
db_root_username,
|
||||||
|
validate=False,
|
||||||
|
site_config_path=common_site_config_path,
|
||||||
|
)
|
||||||
|
update_site_config(
|
||||||
|
"root_password",
|
||||||
|
db_root_password,
|
||||||
|
validate=False,
|
||||||
|
site_config_path=common_site_config_path,
|
||||||
|
)
|
||||||
|
|
||||||
force = True if os.environ.get("FORCE", None) else False
|
force = True if os.environ.get("FORCE", None) else False
|
||||||
install_apps = os.environ.get("INSTALL_APPS", None)
|
install_apps = os.environ.get("INSTALL_APPS", None)
|
||||||
install_apps = install_apps.split(',') if install_apps else []
|
install_apps = install_apps.split(",") if install_apps else []
|
||||||
frappe.init(site_name, new_site=True)
|
frappe.init(site_name, new_site=True)
|
||||||
|
|
||||||
if semantic_version.Version(frappe.__version__).major > 11:
|
if semantic_version.Version(frappe.__version__).major > 11:
|
||||||
@ -59,7 +64,7 @@ def main():
|
|||||||
site_name,
|
site_name,
|
||||||
mariadb_root_username=db_root_username,
|
mariadb_root_username=db_root_username,
|
||||||
mariadb_root_password=db_root_password,
|
mariadb_root_password=db_root_password,
|
||||||
admin_password=get_password("ADMIN_PASSWORD", 'admin'),
|
admin_password=get_password("ADMIN_PASSWORD", "admin"),
|
||||||
verbose=True,
|
verbose=True,
|
||||||
install_apps=install_apps,
|
install_apps=install_apps,
|
||||||
source_sql=None,
|
source_sql=None,
|
||||||
@ -75,7 +80,7 @@ def main():
|
|||||||
site_name,
|
site_name,
|
||||||
mariadb_root_username=db_root_username,
|
mariadb_root_username=db_root_username,
|
||||||
mariadb_root_password=db_root_password,
|
mariadb_root_password=db_root_password,
|
||||||
admin_password=get_password("ADMIN_PASSWORD", 'admin'),
|
admin_password=get_password("ADMIN_PASSWORD", "admin"),
|
||||||
verbose=True,
|
verbose=True,
|
||||||
install_apps=install_apps,
|
install_apps=install_apps,
|
||||||
source_sql=None,
|
source_sql=None,
|
||||||
@ -83,16 +88,23 @@ def main():
|
|||||||
reinstall=False,
|
reinstall=False,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
if db_type == "mariadb":
|
if db_type == "mariadb":
|
||||||
site_config = get_site_config(site_name)
|
site_config = get_site_config(site_name)
|
||||||
db_name = site_config.get('db_name')
|
db_name = site_config.get("db_name")
|
||||||
db_password = site_config.get('db_password')
|
db_password = site_config.get("db_password")
|
||||||
|
|
||||||
mysql_command = ["mysql", f"-h{db_host}", f"-u{db_root_username}", f"-p{mariadb_root_password}", "-e"]
|
mysql_command = [
|
||||||
|
"mysql",
|
||||||
|
f"-h{db_host}",
|
||||||
|
f"-u{db_root_username}",
|
||||||
|
f"-p{mariadb_root_password}",
|
||||||
|
"-e",
|
||||||
|
]
|
||||||
|
|
||||||
# Drop User if exists
|
# Drop User if exists
|
||||||
command = mysql_command + [f"DROP USER IF EXISTS '{db_name}'; FLUSH PRIVILEGES;"]
|
command = mysql_command + [
|
||||||
|
f"DROP USER IF EXISTS '{db_name}'; FLUSH PRIVILEGES;"
|
||||||
|
]
|
||||||
run_command(command)
|
run_command(command)
|
||||||
|
|
||||||
# Grant permission to database and set password
|
# Grant permission to database and set password
|
||||||
@ -102,10 +114,12 @@ def main():
|
|||||||
if config.get(RDS_DB) or site_config.get(RDS_DB):
|
if config.get(RDS_DB) or site_config.get(RDS_DB):
|
||||||
grant_privileges = RDS_PRIVILEGES
|
grant_privileges = RDS_PRIVILEGES
|
||||||
|
|
||||||
command = mysql_command + [f"\
|
command = mysql_command + [
|
||||||
|
f"\
|
||||||
CREATE USER IF NOT EXISTS '{db_name}'@'%' IDENTIFIED BY '{db_password}'; \
|
CREATE USER IF NOT EXISTS '{db_name}'@'%' IDENTIFIED BY '{db_password}'; \
|
||||||
GRANT {grant_privileges} ON `{db_name}`.* TO '{db_name}'@'%'; \
|
GRANT {grant_privileges} ON `{db_name}`.* TO '{db_name}'@'%'; \
|
||||||
FLUSH PRIVILEGES;"]
|
FLUSH PRIVILEGES;"
|
||||||
|
]
|
||||||
run_command(command)
|
run_command(command)
|
||||||
|
|
||||||
if frappe.redis_server:
|
if frappe.redis_server:
|
||||||
|
@ -1,16 +1,12 @@
|
|||||||
|
import datetime
|
||||||
import os
|
import os
|
||||||
import time
|
import time
|
||||||
import boto3
|
|
||||||
|
|
||||||
import datetime
|
|
||||||
from glob import glob
|
from glob import glob
|
||||||
from frappe.utils import get_sites
|
|
||||||
|
import boto3
|
||||||
from constants import DATE_FORMAT
|
from constants import DATE_FORMAT
|
||||||
from utils import (
|
from frappe.utils import get_sites
|
||||||
get_s3_config,
|
from utils import check_s3_environment_variables, get_s3_config, upload_file_to_s3
|
||||||
upload_file_to_s3,
|
|
||||||
check_s3_environment_variables,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def get_file_ext():
|
def get_file_ext():
|
||||||
@ -18,7 +14,7 @@ def get_file_ext():
|
|||||||
"database": "-database.sql.gz",
|
"database": "-database.sql.gz",
|
||||||
"private_files": "-private-files.tar",
|
"private_files": "-private-files.tar",
|
||||||
"public_files": "-files.tar",
|
"public_files": "-files.tar",
|
||||||
"site_config": "-site_config_backup.json"
|
"site_config": "-site_config_backup.json",
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -31,19 +27,26 @@ def get_backup_details(sitename):
|
|||||||
|
|
||||||
if os.path.exists(site_backup_path):
|
if os.path.exists(site_backup_path):
|
||||||
for filetype, ext in file_ext.items():
|
for filetype, ext in file_ext.items():
|
||||||
site_slug = sitename.replace('.', '_')
|
site_slug = sitename.replace(".", "_")
|
||||||
pattern = site_backup_path + '*-' + site_slug + ext
|
pattern = site_backup_path + "*-" + site_slug + ext
|
||||||
backup_files = list(filter(os.path.isfile, glob(pattern)))
|
backup_files = list(filter(os.path.isfile, glob(pattern)))
|
||||||
|
|
||||||
if len(backup_files) > 0:
|
if len(backup_files) > 0:
|
||||||
backup_files.sort(key=lambda file: os.stat(os.path.join(site_backup_path, file)).st_ctime)
|
backup_files.sort(
|
||||||
backup_date = datetime.datetime.strptime(time.ctime(os.path.getmtime(backup_files[0])), "%a %b %d %H:%M:%S %Y")
|
key=lambda file: os.stat(
|
||||||
|
os.path.join(site_backup_path, file)
|
||||||
|
).st_ctime
|
||||||
|
)
|
||||||
|
backup_date = datetime.datetime.strptime(
|
||||||
|
time.ctime(os.path.getmtime(backup_files[0])),
|
||||||
|
"%a %b %d %H:%M:%S %Y",
|
||||||
|
)
|
||||||
backup_details[filetype] = {
|
backup_details[filetype] = {
|
||||||
"sitename": sitename,
|
"sitename": sitename,
|
||||||
"file_size_in_bytes": os.stat(backup_files[-1]).st_size,
|
"file_size_in_bytes": os.stat(backup_files[-1]).st_size,
|
||||||
"file_path": os.path.abspath(backup_files[-1]),
|
"file_path": os.path.abspath(backup_files[-1]),
|
||||||
"filename": os.path.basename(backup_files[-1]),
|
"filename": os.path.basename(backup_files[-1]),
|
||||||
"backup_date": backup_date.date().strftime("%Y-%m-%d %H:%M:%S")
|
"backup_date": backup_date.date().strftime("%Y-%m-%d %H:%M:%S"),
|
||||||
}
|
}
|
||||||
|
|
||||||
return backup_details
|
return backup_details
|
||||||
@ -54,31 +57,34 @@ def delete_old_backups(limit, bucket, site_name):
|
|||||||
all_backup_dates = list()
|
all_backup_dates = list()
|
||||||
backup_limit = int(limit)
|
backup_limit = int(limit)
|
||||||
check_s3_environment_variables()
|
check_s3_environment_variables()
|
||||||
bucket_dir = os.environ.get('BUCKET_DIR')
|
bucket_dir = os.environ.get("BUCKET_DIR")
|
||||||
oldest_backup_date = None
|
oldest_backup_date = None
|
||||||
|
|
||||||
s3 = boto3.resource(
|
s3 = boto3.resource(
|
||||||
's3',
|
"s3",
|
||||||
region_name=os.environ.get('REGION'),
|
region_name=os.environ.get("REGION"),
|
||||||
aws_access_key_id=os.environ.get('ACCESS_KEY_ID'),
|
aws_access_key_id=os.environ.get("ACCESS_KEY_ID"),
|
||||||
aws_secret_access_key=os.environ.get('SECRET_ACCESS_KEY'),
|
aws_secret_access_key=os.environ.get("SECRET_ACCESS_KEY"),
|
||||||
endpoint_url=os.environ.get('ENDPOINT_URL')
|
endpoint_url=os.environ.get("ENDPOINT_URL"),
|
||||||
)
|
)
|
||||||
|
|
||||||
bucket = s3.Bucket(bucket)
|
bucket = s3.Bucket(bucket)
|
||||||
objects = bucket.meta.client.list_objects_v2(
|
objects = bucket.meta.client.list_objects_v2(Bucket=bucket.name, Delimiter="/")
|
||||||
Bucket=bucket.name,
|
|
||||||
Delimiter='/')
|
|
||||||
|
|
||||||
if objects:
|
if objects:
|
||||||
for obj in objects.get('CommonPrefixes'):
|
for obj in objects.get("CommonPrefixes"):
|
||||||
if obj.get('Prefix') == bucket_dir + '/':
|
if obj.get("Prefix") == bucket_dir + "/":
|
||||||
for backup_obj in bucket.objects.filter(Prefix=obj.get('Prefix')):
|
for backup_obj in bucket.objects.filter(Prefix=obj.get("Prefix")):
|
||||||
if backup_obj.get()["ContentType"] == "application/x-directory":
|
if backup_obj.get()["ContentType"] == "application/x-directory":
|
||||||
continue
|
continue
|
||||||
try:
|
try:
|
||||||
# backup_obj.key is bucket_dir/site/date_time/backupfile.extension
|
# backup_obj.key is bucket_dir/site/date_time/backupfile.extension
|
||||||
bucket_dir, site_slug, date_time, backupfile = backup_obj.key.split('/')
|
(
|
||||||
|
bucket_dir,
|
||||||
|
site_slug,
|
||||||
|
date_time,
|
||||||
|
backupfile,
|
||||||
|
) = backup_obj.key.split("/")
|
||||||
date_time_object = datetime.datetime.strptime(
|
date_time_object = datetime.datetime.strptime(
|
||||||
date_time, DATE_FORMAT
|
date_time, DATE_FORMAT
|
||||||
)
|
)
|
||||||
@ -98,7 +104,7 @@ def delete_old_backups(limit, bucket, site_name):
|
|||||||
for backup in all_backups:
|
for backup in all_backups:
|
||||||
try:
|
try:
|
||||||
# backup is bucket_dir/site/date_time/backupfile.extension
|
# backup is bucket_dir/site/date_time/backupfile.extension
|
||||||
backup_dir, site_slug, backup_dt_string, filename = backup.split('/')
|
backup_dir, site_slug, backup_dt_string, filename = backup.split("/")
|
||||||
backup_datetime = datetime.datetime.strptime(
|
backup_datetime = datetime.datetime.strptime(
|
||||||
backup_dt_string, DATE_FORMAT
|
backup_dt_string, DATE_FORMAT
|
||||||
)
|
)
|
||||||
@ -113,7 +119,7 @@ def delete_old_backups(limit, bucket, site_name):
|
|||||||
for obj in bucket.objects.filter(Prefix=oldest_backup):
|
for obj in bucket.objects.filter(Prefix=oldest_backup):
|
||||||
# delete all keys that are inside the oldest_backup
|
# delete all keys that are inside the oldest_backup
|
||||||
if bucket_dir in obj.key:
|
if bucket_dir in obj.key:
|
||||||
print('Deleteing ' + obj.key)
|
print("Deleting " + obj.key)
|
||||||
s3.Object(bucket.name, obj.key).delete()
|
s3.Object(bucket.name, obj.key).delete()
|
||||||
|
|
||||||
|
|
||||||
@ -124,31 +130,52 @@ def main():
|
|||||||
|
|
||||||
for site in sites:
|
for site in sites:
|
||||||
details = get_backup_details(site)
|
details = get_backup_details(site)
|
||||||
db_file = details.get('database', {}).get('file_path')
|
db_file = details.get("database", {}).get("file_path")
|
||||||
folder = os.environ.get('BUCKET_DIR') + '/' + site + '/'
|
folder = os.environ.get("BUCKET_DIR") + "/" + site + "/"
|
||||||
if db_file:
|
if db_file:
|
||||||
folder = os.environ.get('BUCKET_DIR') + '/' + site + '/' + os.path.basename(db_file)[:15] + '/'
|
folder = (
|
||||||
|
os.environ.get("BUCKET_DIR")
|
||||||
|
+ "/"
|
||||||
|
+ site
|
||||||
|
+ "/"
|
||||||
|
+ os.path.basename(db_file)[:15]
|
||||||
|
+ "/"
|
||||||
|
)
|
||||||
upload_file_to_s3(db_file, folder, conn, bucket)
|
upload_file_to_s3(db_file, folder, conn, bucket)
|
||||||
|
|
||||||
# Archive site_config.json
|
# Archive site_config.json
|
||||||
site_config_file = details.get('site_config', {}).get('file_path')
|
site_config_file = details.get("site_config", {}).get("file_path")
|
||||||
if not site_config_file:
|
if not site_config_file:
|
||||||
site_config_file = os.path.join(os.getcwd(), site, 'site_config.json')
|
site_config_file = os.path.join(os.getcwd(), site, "site_config.json")
|
||||||
upload_file_to_s3(site_config_file, folder, conn, bucket)
|
upload_file_to_s3(site_config_file, folder, conn, bucket)
|
||||||
|
|
||||||
public_files = details.get('public_files', {}).get('file_path')
|
public_files = details.get("public_files", {}).get("file_path")
|
||||||
if public_files:
|
if public_files:
|
||||||
folder = os.environ.get('BUCKET_DIR') + '/' + site + '/' + os.path.basename(public_files)[:15] + '/'
|
folder = (
|
||||||
|
os.environ.get("BUCKET_DIR")
|
||||||
|
+ "/"
|
||||||
|
+ site
|
||||||
|
+ "/"
|
||||||
|
+ os.path.basename(public_files)[:15]
|
||||||
|
+ "/"
|
||||||
|
)
|
||||||
upload_file_to_s3(public_files, folder, conn, bucket)
|
upload_file_to_s3(public_files, folder, conn, bucket)
|
||||||
|
|
||||||
private_files = details.get('private_files', {}).get('file_path')
|
private_files = details.get("private_files", {}).get("file_path")
|
||||||
if private_files:
|
if private_files:
|
||||||
folder = os.environ.get('BUCKET_DIR') + '/' + site + '/' + os.path.basename(private_files)[:15] + '/'
|
folder = (
|
||||||
|
os.environ.get("BUCKET_DIR")
|
||||||
|
+ "/"
|
||||||
|
+ site
|
||||||
|
+ "/"
|
||||||
|
+ os.path.basename(private_files)[:15]
|
||||||
|
+ "/"
|
||||||
|
)
|
||||||
upload_file_to_s3(private_files, folder, conn, bucket)
|
upload_file_to_s3(private_files, folder, conn, bucket)
|
||||||
|
|
||||||
delete_old_backups(os.environ.get('BACKUP_LIMIT', '3'), bucket, site)
|
delete_old_backups(os.environ.get("BACKUP_LIMIT", "3"), bucket, site)
|
||||||
|
|
||||||
print('push-backup complete')
|
print("push-backup complete")
|
||||||
exit(0)
|
exit(0)
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,93 +1,88 @@
|
|||||||
import os
|
|
||||||
import datetime
|
import datetime
|
||||||
import tarfile
|
|
||||||
import hashlib
|
import hashlib
|
||||||
import frappe
|
import os
|
||||||
import boto3
|
import tarfile
|
||||||
|
|
||||||
from frappe.utils import get_sites, random_string
|
import boto3
|
||||||
from frappe.installer import (
|
import frappe
|
||||||
make_conf,
|
|
||||||
get_conf_params,
|
|
||||||
make_site_dirs,
|
|
||||||
update_site_config
|
|
||||||
)
|
|
||||||
from constants import COMMON_SITE_CONFIG_FILE, DATE_FORMAT, RDS_DB, RDS_PRIVILEGES
|
from constants import COMMON_SITE_CONFIG_FILE, DATE_FORMAT, RDS_DB, RDS_PRIVILEGES
|
||||||
|
from frappe.installer import (
|
||||||
|
get_conf_params,
|
||||||
|
make_conf,
|
||||||
|
make_site_dirs,
|
||||||
|
update_site_config,
|
||||||
|
)
|
||||||
|
from frappe.utils import get_sites, random_string
|
||||||
from utils import (
|
from utils import (
|
||||||
run_command,
|
check_s3_environment_variables,
|
||||||
list_directories,
|
|
||||||
set_key_in_site_config,
|
|
||||||
get_site_config,
|
|
||||||
get_config,
|
get_config,
|
||||||
get_password,
|
get_password,
|
||||||
check_s3_environment_variables,
|
get_site_config,
|
||||||
|
list_directories,
|
||||||
|
run_command,
|
||||||
|
set_key_in_site_config,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
def get_backup_dir():
|
def get_backup_dir():
|
||||||
return os.path.join(
|
return os.path.join(os.path.expanduser("~"), "backups")
|
||||||
os.path.expanduser('~'),
|
|
||||||
'backups'
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def decompress_db(database_file, site):
|
def decompress_db(database_file, site):
|
||||||
command = ["gunzip", "-c", database_file]
|
command = ["gunzip", "-c", database_file]
|
||||||
with open(database_file.replace(".gz", ""), "w") as db_file:
|
with open(database_file.replace(".gz", ""), "w") as db_file:
|
||||||
print('Extract Database GZip for site {}'.format(site))
|
print(f"Extract Database GZip for site {site}")
|
||||||
run_command(command, stdout=db_file)
|
run_command(command, stdout=db_file)
|
||||||
|
|
||||||
|
|
||||||
def restore_database(files_base, site_config_path, site):
|
def restore_database(files_base, site_config_path, site):
|
||||||
# restore database
|
# restore database
|
||||||
database_file = files_base + '-database.sql.gz'
|
database_file = files_base + "-database.sql.gz"
|
||||||
decompress_db(database_file, site)
|
decompress_db(database_file, site)
|
||||||
config = get_config()
|
config = get_config()
|
||||||
|
|
||||||
# Set db_type if it exists in backup site_config.json
|
# Set db_type if it exists in backup site_config.json
|
||||||
set_key_in_site_config('db_type', site, site_config_path)
|
set_key_in_site_config("db_type", site, site_config_path)
|
||||||
# Set db_host if it exists in backup site_config.json
|
# Set db_host if it exists in backup site_config.json
|
||||||
set_key_in_site_config('db_host', site, site_config_path)
|
set_key_in_site_config("db_host", site, site_config_path)
|
||||||
# Set db_port if it exists in backup site_config.json
|
# Set db_port if it exists in backup site_config.json
|
||||||
set_key_in_site_config('db_port', site, site_config_path)
|
set_key_in_site_config("db_port", site, site_config_path)
|
||||||
|
|
||||||
# get updated site_config
|
# get updated site_config
|
||||||
site_config = get_site_config(site)
|
site_config = get_site_config(site)
|
||||||
|
|
||||||
# if no db_type exists, default to mariadb
|
# if no db_type exists, default to mariadb
|
||||||
db_type = site_config.get('db_type', 'mariadb')
|
db_type = site_config.get("db_type", "mariadb")
|
||||||
is_database_restored = False
|
is_database_restored = False
|
||||||
|
|
||||||
if db_type == 'mariadb':
|
if db_type == "mariadb":
|
||||||
restore_mariadb(
|
restore_mariadb(
|
||||||
config=config,
|
config=config, site_config=site_config, database_file=database_file
|
||||||
site_config=site_config,
|
)
|
||||||
database_file=database_file)
|
|
||||||
is_database_restored = True
|
is_database_restored = True
|
||||||
elif db_type == 'postgres':
|
elif db_type == "postgres":
|
||||||
restore_postgres(
|
restore_postgres(
|
||||||
config=config,
|
config=config, site_config=site_config, database_file=database_file
|
||||||
site_config=site_config,
|
)
|
||||||
database_file=database_file)
|
|
||||||
is_database_restored = True
|
is_database_restored = True
|
||||||
|
|
||||||
if is_database_restored:
|
if is_database_restored:
|
||||||
# Set encryption_key if it exists in backup site_config.json
|
# Set encryption_key if it exists in backup site_config.json
|
||||||
set_key_in_site_config('encryption_key', site, site_config_path)
|
set_key_in_site_config("encryption_key", site, site_config_path)
|
||||||
|
|
||||||
|
|
||||||
def restore_files(files_base):
|
def restore_files(files_base):
|
||||||
public_files = files_base + '-files.tar'
|
public_files = files_base + "-files.tar"
|
||||||
# extract tar
|
# extract tar
|
||||||
public_tar = tarfile.open(public_files)
|
public_tar = tarfile.open(public_files)
|
||||||
print('Extracting {}'.format(public_files))
|
print(f"Extracting {public_files}")
|
||||||
public_tar.extractall()
|
public_tar.extractall()
|
||||||
|
|
||||||
|
|
||||||
def restore_private_files(files_base):
|
def restore_private_files(files_base):
|
||||||
private_files = files_base + '-private-files.tar'
|
private_files = files_base + "-private-files.tar"
|
||||||
private_tar = tarfile.open(private_files)
|
private_tar = tarfile.open(private_files)
|
||||||
print('Extracting {}'.format(private_files))
|
print(f"Extracting {private_files}")
|
||||||
private_tar.extractall()
|
private_tar.extractall()
|
||||||
|
|
||||||
|
|
||||||
@ -96,15 +91,15 @@ def pull_backup_from_s3():
|
|||||||
|
|
||||||
# https://stackoverflow.com/a/54672690
|
# https://stackoverflow.com/a/54672690
|
||||||
s3 = boto3.resource(
|
s3 = boto3.resource(
|
||||||
's3',
|
"s3",
|
||||||
region_name=os.environ.get('REGION'),
|
region_name=os.environ.get("REGION"),
|
||||||
aws_access_key_id=os.environ.get('ACCESS_KEY_ID'),
|
aws_access_key_id=os.environ.get("ACCESS_KEY_ID"),
|
||||||
aws_secret_access_key=os.environ.get('SECRET_ACCESS_KEY'),
|
aws_secret_access_key=os.environ.get("SECRET_ACCESS_KEY"),
|
||||||
endpoint_url=os.environ.get('ENDPOINT_URL')
|
endpoint_url=os.environ.get("ENDPOINT_URL"),
|
||||||
)
|
)
|
||||||
|
|
||||||
bucket_dir = os.environ.get('BUCKET_DIR')
|
bucket_dir = os.environ.get("BUCKET_DIR")
|
||||||
bucket_name = os.environ.get('BUCKET_NAME')
|
bucket_name = os.environ.get("BUCKET_NAME")
|
||||||
bucket = s3.Bucket(bucket_name)
|
bucket = s3.Bucket(bucket_name)
|
||||||
|
|
||||||
# Change directory to /home/frappe/backups
|
# Change directory to /home/frappe/backups
|
||||||
@ -118,10 +113,10 @@ def pull_backup_from_s3():
|
|||||||
for obj in bucket.objects.filter(Prefix=bucket_dir):
|
for obj in bucket.objects.filter(Prefix=bucket_dir):
|
||||||
if obj.get()["ContentType"] == "application/x-directory":
|
if obj.get()["ContentType"] == "application/x-directory":
|
||||||
continue
|
continue
|
||||||
backup_file = obj.key.replace(os.path.join(bucket_dir, ''), '')
|
backup_file = obj.key.replace(os.path.join(bucket_dir, ""), "")
|
||||||
backup_files.append(backup_file)
|
backup_files.append(backup_file)
|
||||||
site_name, timestamp, backup_type = backup_file.split('/')
|
site_name, timestamp, backup_type = backup_file.split("/")
|
||||||
site_timestamp = site_name + '/' + timestamp
|
site_timestamp = site_name + "/" + timestamp
|
||||||
sites.add(site_name)
|
sites.add(site_name)
|
||||||
site_timestamps.add(site_timestamp)
|
site_timestamps.add(site_timestamp)
|
||||||
|
|
||||||
@ -129,13 +124,11 @@ def pull_backup_from_s3():
|
|||||||
for site in sites:
|
for site in sites:
|
||||||
backup_timestamps = []
|
backup_timestamps = []
|
||||||
for site_timestamp in site_timestamps:
|
for site_timestamp in site_timestamps:
|
||||||
site_name, timestamp = site_timestamp.split('/')
|
site_name, timestamp = site_timestamp.split("/")
|
||||||
if site == site_name:
|
if site == site_name:
|
||||||
timestamp_datetime = datetime.datetime.strptime(
|
timestamp_datetime = datetime.datetime.strptime(timestamp, DATE_FORMAT)
|
||||||
timestamp, DATE_FORMAT
|
|
||||||
)
|
|
||||||
backup_timestamps.append(timestamp)
|
backup_timestamps.append(timestamp)
|
||||||
download_backups.append(site + '/' + max(backup_timestamps))
|
download_backups.append(site + "/" + max(backup_timestamps))
|
||||||
|
|
||||||
# Only download latest backups
|
# Only download latest backups
|
||||||
for backup_file in backup_files:
|
for backup_file in backup_files:
|
||||||
@ -143,21 +136,21 @@ def pull_backup_from_s3():
|
|||||||
if backup in backup_file:
|
if backup in backup_file:
|
||||||
if not os.path.exists(os.path.dirname(backup_file)):
|
if not os.path.exists(os.path.dirname(backup_file)):
|
||||||
os.makedirs(os.path.dirname(backup_file))
|
os.makedirs(os.path.dirname(backup_file))
|
||||||
print('Downloading {}'.format(backup_file))
|
print(f"Downloading {backup_file}")
|
||||||
bucket.download_file(bucket_dir + '/' + backup_file, backup_file)
|
bucket.download_file(bucket_dir + "/" + backup_file, backup_file)
|
||||||
|
|
||||||
os.chdir(os.path.join(os.path.expanduser('~'), 'frappe-bench', 'sites'))
|
os.chdir(os.path.join(os.path.expanduser("~"), "frappe-bench", "sites"))
|
||||||
|
|
||||||
|
|
||||||
def restore_postgres(config, site_config, database_file):
|
def restore_postgres(config, site_config, database_file):
|
||||||
# common config
|
# common config
|
||||||
common_site_config_path = os.path.join(os.getcwd(), COMMON_SITE_CONFIG_FILE)
|
common_site_config_path = os.path.join(os.getcwd(), COMMON_SITE_CONFIG_FILE)
|
||||||
|
|
||||||
db_root_user = config.get('root_login')
|
db_root_user = config.get("root_login")
|
||||||
if not db_root_user:
|
if not db_root_user:
|
||||||
postgres_user = os.environ.get('DB_ROOT_USER')
|
postgres_user = os.environ.get("DB_ROOT_USER")
|
||||||
if not postgres_user:
|
if not postgres_user:
|
||||||
print('Variable DB_ROOT_USER not set')
|
print("Variable DB_ROOT_USER not set")
|
||||||
exit(1)
|
exit(1)
|
||||||
|
|
||||||
db_root_user = postgres_user
|
db_root_user = postgres_user
|
||||||
@ -165,13 +158,14 @@ def restore_postgres(config, site_config, database_file):
|
|||||||
"root_login",
|
"root_login",
|
||||||
db_root_user,
|
db_root_user,
|
||||||
validate=False,
|
validate=False,
|
||||||
site_config_path=common_site_config_path)
|
site_config_path=common_site_config_path,
|
||||||
|
)
|
||||||
|
|
||||||
db_root_password = config.get('root_password')
|
db_root_password = config.get("root_password")
|
||||||
if not db_root_password:
|
if not db_root_password:
|
||||||
root_password = get_password('POSTGRES_PASSWORD')
|
root_password = get_password("POSTGRES_PASSWORD")
|
||||||
if not root_password:
|
if not root_password:
|
||||||
print('Variable POSTGRES_PASSWORD not set')
|
print("Variable POSTGRES_PASSWORD not set")
|
||||||
exit(1)
|
exit(1)
|
||||||
|
|
||||||
db_root_password = root_password
|
db_root_password = root_password
|
||||||
@ -179,53 +173,72 @@ def restore_postgres(config, site_config, database_file):
|
|||||||
"root_password",
|
"root_password",
|
||||||
db_root_password,
|
db_root_password,
|
||||||
validate=False,
|
validate=False,
|
||||||
site_config_path=common_site_config_path)
|
site_config_path=common_site_config_path,
|
||||||
|
)
|
||||||
|
|
||||||
# site config
|
# site config
|
||||||
db_host = site_config.get('db_host')
|
db_host = site_config.get("db_host")
|
||||||
db_port = site_config.get('db_port', 5432)
|
db_port = site_config.get("db_port", 5432)
|
||||||
db_name = site_config.get('db_name')
|
db_name = site_config.get("db_name")
|
||||||
db_password = site_config.get('db_password')
|
db_password = site_config.get("db_password")
|
||||||
|
|
||||||
psql_command = ["psql"]
|
psql_command = ["psql"]
|
||||||
psql_uri = f"postgres://{db_root_user}:{db_root_password}@{db_host}:{db_port}"
|
psql_uri = f"postgres://{db_root_user}:{db_root_password}@{db_host}:{db_port}"
|
||||||
|
|
||||||
print('Restoring PostgreSQL')
|
print("Restoring PostgreSQL")
|
||||||
run_command(psql_command + [psql_uri, "-c", f"DROP DATABASE IF EXISTS \"{db_name}\""])
|
run_command(psql_command + [psql_uri, "-c", f'DROP DATABASE IF EXISTS "{db_name}"'])
|
||||||
run_command(psql_command + [psql_uri, "-c", f"DROP USER IF EXISTS {db_name}"])
|
run_command(psql_command + [psql_uri, "-c", f"DROP USER IF EXISTS {db_name}"])
|
||||||
run_command(psql_command + [psql_uri, "-c", f"CREATE DATABASE \"{db_name}\""])
|
run_command(psql_command + [psql_uri, "-c", f'CREATE DATABASE "{db_name}"'])
|
||||||
run_command(psql_command + [psql_uri, "-c", f"CREATE user {db_name} password '{db_password}'"])
|
run_command(
|
||||||
run_command(psql_command + [psql_uri, "-c", f"GRANT ALL PRIVILEGES ON DATABASE \"{db_name}\" TO {db_name}"])
|
psql_command
|
||||||
with open(database_file.replace('.gz', ''), 'r') as db_file:
|
+ [psql_uri, "-c", f"CREATE user {db_name} password '{db_password}'"]
|
||||||
|
)
|
||||||
|
run_command(
|
||||||
|
psql_command
|
||||||
|
+ [psql_uri, "-c", f'GRANT ALL PRIVILEGES ON DATABASE "{db_name}" TO {db_name}']
|
||||||
|
)
|
||||||
|
with open(database_file.replace(".gz", "")) as db_file:
|
||||||
run_command(psql_command + [f"{psql_uri}/{db_name}", "<"], stdin=db_file)
|
run_command(psql_command + [f"{psql_uri}/{db_name}", "<"], stdin=db_file)
|
||||||
|
|
||||||
|
|
||||||
def restore_mariadb(config, site_config, database_file):
|
def restore_mariadb(config, site_config, database_file):
|
||||||
db_root_password = get_password('MYSQL_ROOT_PASSWORD')
|
db_root_password = get_password("MYSQL_ROOT_PASSWORD")
|
||||||
if not db_root_password:
|
if not db_root_password:
|
||||||
print('Variable MYSQL_ROOT_PASSWORD not set')
|
print("Variable MYSQL_ROOT_PASSWORD not set")
|
||||||
exit(1)
|
exit(1)
|
||||||
|
|
||||||
db_root_user = os.environ.get("DB_ROOT_USER", 'root')
|
db_root_user = os.environ.get("DB_ROOT_USER", "root")
|
||||||
|
|
||||||
db_host = site_config.get('db_host', config.get('db_host'))
|
db_host = site_config.get("db_host", config.get("db_host"))
|
||||||
db_port = site_config.get('db_port', config.get('db_port', 3306))
|
db_port = site_config.get("db_port", config.get("db_port", 3306))
|
||||||
db_name = site_config.get('db_name')
|
db_name = site_config.get("db_name")
|
||||||
db_password = site_config.get('db_password')
|
db_password = site_config.get("db_password")
|
||||||
|
|
||||||
# mysql command prefix
|
# mysql command prefix
|
||||||
mysql_command = ["mysql", f"-u{db_root_user}", f"-h{db_host}", f"-p{db_root_password}", f"-P{db_port}"]
|
mysql_command = [
|
||||||
|
"mysql",
|
||||||
|
f"-u{db_root_user}",
|
||||||
|
f"-h{db_host}",
|
||||||
|
f"-p{db_root_password}",
|
||||||
|
f"-P{db_port}",
|
||||||
|
]
|
||||||
|
|
||||||
# drop db if exists for clean restore
|
# drop db if exists for clean restore
|
||||||
drop_database = mysql_command + ["-e", f"DROP DATABASE IF EXISTS `{db_name}`;"]
|
drop_database = mysql_command + ["-e", f"DROP DATABASE IF EXISTS `{db_name}`;"]
|
||||||
run_command(drop_database)
|
run_command(drop_database)
|
||||||
|
|
||||||
# create db
|
# create db
|
||||||
create_database = mysql_command + ["-e", f"CREATE DATABASE IF NOT EXISTS `{db_name}`;"]
|
create_database = mysql_command + [
|
||||||
|
"-e",
|
||||||
|
f"CREATE DATABASE IF NOT EXISTS `{db_name}`;",
|
||||||
|
]
|
||||||
run_command(create_database)
|
run_command(create_database)
|
||||||
|
|
||||||
# create user
|
# create user
|
||||||
create_user = mysql_command + ["-e", f"CREATE USER IF NOT EXISTS '{db_name}'@'%' IDENTIFIED BY '{db_password}'; FLUSH PRIVILEGES;"]
|
create_user = mysql_command + [
|
||||||
|
"-e",
|
||||||
|
f"CREATE USER IF NOT EXISTS '{db_name}'@'%' IDENTIFIED BY '{db_password}'; FLUSH PRIVILEGES;",
|
||||||
|
]
|
||||||
run_command(create_user)
|
run_command(create_user)
|
||||||
|
|
||||||
# grant db privileges to user
|
# grant db privileges to user
|
||||||
@ -236,11 +249,14 @@ def restore_mariadb(config, site_config, database_file):
|
|||||||
if config.get(RDS_DB) or site_config.get(RDS_DB):
|
if config.get(RDS_DB) or site_config.get(RDS_DB):
|
||||||
grant_privileges = RDS_PRIVILEGES
|
grant_privileges = RDS_PRIVILEGES
|
||||||
|
|
||||||
grant_privileges_command = mysql_command + ["-e", f"GRANT {grant_privileges} ON `{db_name}`.* TO '{db_name}'@'%' IDENTIFIED BY '{db_password}'; FLUSH PRIVILEGES;"]
|
grant_privileges_command = mysql_command + [
|
||||||
|
"-e",
|
||||||
|
f"GRANT {grant_privileges} ON `{db_name}`.* TO '{db_name}'@'%' IDENTIFIED BY '{db_password}'; FLUSH PRIVILEGES;",
|
||||||
|
]
|
||||||
run_command(grant_privileges_command)
|
run_command(grant_privileges_command)
|
||||||
|
|
||||||
print('Restoring MariaDB')
|
print("Restoring MariaDB")
|
||||||
with open(database_file.replace('.gz', ''), 'r') as db_file:
|
with open(database_file.replace(".gz", "")) as db_file:
|
||||||
run_command(mysql_command + [f"{db_name}"], stdin=db_file)
|
run_command(mysql_command + [f"{db_name}"], stdin=db_file)
|
||||||
|
|
||||||
|
|
||||||
@ -251,35 +267,38 @@ def main():
|
|||||||
pull_backup_from_s3()
|
pull_backup_from_s3()
|
||||||
|
|
||||||
for site in list_directories(backup_dir):
|
for site in list_directories(backup_dir):
|
||||||
site_slug = site.replace('.', '_')
|
site_slug = site.replace(".", "_")
|
||||||
backups = [datetime.datetime.strptime(backup, DATE_FORMAT) for backup in list_directories(os.path.join(backup_dir, site))]
|
backups = [
|
||||||
|
datetime.datetime.strptime(backup, DATE_FORMAT)
|
||||||
|
for backup in list_directories(os.path.join(backup_dir, site))
|
||||||
|
]
|
||||||
latest_backup = max(backups).strftime(DATE_FORMAT)
|
latest_backup = max(backups).strftime(DATE_FORMAT)
|
||||||
files_base = os.path.join(backup_dir, site, latest_backup, '')
|
files_base = os.path.join(backup_dir, site, latest_backup, "")
|
||||||
files_base += latest_backup + '-' + site_slug
|
files_base += latest_backup + "-" + site_slug
|
||||||
site_config_path = files_base + '-site_config_backup.json'
|
site_config_path = files_base + "-site_config_backup.json"
|
||||||
if not os.path.exists(site_config_path):
|
if not os.path.exists(site_config_path):
|
||||||
site_config_path = os.path.join(backup_dir, site, 'site_config.json')
|
site_config_path = os.path.join(backup_dir, site, "site_config.json")
|
||||||
if site in get_sites():
|
if site in get_sites():
|
||||||
print('Overwrite site {}'.format(site))
|
print(f"Overwrite site {site}")
|
||||||
restore_database(files_base, site_config_path, site)
|
restore_database(files_base, site_config_path, site)
|
||||||
restore_private_files(files_base)
|
restore_private_files(files_base)
|
||||||
restore_files(files_base)
|
restore_files(files_base)
|
||||||
else:
|
else:
|
||||||
site_config = get_conf_params(
|
site_config = get_conf_params(
|
||||||
db_name='_' + hashlib.sha1(site.encode()).hexdigest()[:16],
|
db_name="_" + hashlib.sha1(site.encode()).hexdigest()[:16],
|
||||||
db_password=random_string(16)
|
db_password=random_string(16),
|
||||||
)
|
)
|
||||||
|
|
||||||
frappe.local.site = site
|
frappe.local.site = site
|
||||||
frappe.local.sites_path = os.getcwd()
|
frappe.local.sites_path = os.getcwd()
|
||||||
frappe.local.site_path = os.getcwd() + '/' + site
|
frappe.local.site_path = os.getcwd() + "/" + site
|
||||||
make_conf(
|
make_conf(
|
||||||
db_name=site_config.get('db_name'),
|
db_name=site_config.get("db_name"),
|
||||||
db_password=site_config.get('db_password'),
|
db_password=site_config.get("db_password"),
|
||||||
)
|
)
|
||||||
make_site_dirs()
|
make_site_dirs()
|
||||||
|
|
||||||
print('Create site {}'.format(site))
|
print(f"Create site {site}")
|
||||||
restore_database(files_base, site_config_path, site)
|
restore_database(files_base, site_config_path, site)
|
||||||
restore_private_files(files_base)
|
restore_private_files(files_base)
|
||||||
restore_files(files_base)
|
restore_files(files_base)
|
||||||
|
@ -1,15 +1,12 @@
|
|||||||
import json
|
import json
|
||||||
import os
|
import os
|
||||||
import subprocess
|
import subprocess
|
||||||
|
|
||||||
import boto3
|
import boto3
|
||||||
import git
|
import git
|
||||||
|
from constants import APP_VERSIONS_JSON_FILE, APPS_TXT_FILE, COMMON_SITE_CONFIG_FILE
|
||||||
from frappe.installer import update_site_config
|
from frappe.installer import update_site_config
|
||||||
from constants import (
|
|
||||||
APP_VERSIONS_JSON_FILE,
|
|
||||||
APPS_TXT_FILE,
|
|
||||||
COMMON_SITE_CONFIG_FILE
|
|
||||||
)
|
|
||||||
|
|
||||||
def run_command(command, stdout=None, stdin=None, stderr=None):
|
def run_command(command, stdout=None, stdin=None, stderr=None):
|
||||||
stdout = stdout or subprocess.PIPE
|
stdout = stdout or subprocess.PIPE
|
||||||
@ -26,7 +23,7 @@ def run_command(command, stdout=None, stdin=None, stderr=None):
|
|||||||
|
|
||||||
|
|
||||||
def save_version_file(versions):
|
def save_version_file(versions):
|
||||||
with open(APP_VERSIONS_JSON_FILE, 'w') as f:
|
with open(APP_VERSIONS_JSON_FILE, "w") as f:
|
||||||
return json.dump(versions, f, indent=1, sort_keys=True)
|
return json.dump(versions, f, indent=1, sort_keys=True)
|
||||||
|
|
||||||
|
|
||||||
@ -58,10 +55,10 @@ def get_container_versions(apps):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
try:
|
try:
|
||||||
path = os.path.join('..', 'apps', app)
|
path = os.path.join("..", "apps", app)
|
||||||
repo = git.Repo(path)
|
repo = git.Repo(path)
|
||||||
commit_hash = repo.head.object.hexsha
|
commit_hash = repo.head.object.hexsha
|
||||||
versions.update({app+'_git_hash': commit_hash})
|
versions.update({app + "_git_hash": commit_hash})
|
||||||
except Exception:
|
except Exception:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@ -94,18 +91,22 @@ def get_config():
|
|||||||
|
|
||||||
def get_site_config(site_name):
|
def get_site_config(site_name):
|
||||||
site_config = None
|
site_config = None
|
||||||
with open('{site_name}/site_config.json'.format(site_name=site_name)) as site_config_file:
|
with open(f"{site_name}/site_config.json") as site_config_file:
|
||||||
site_config = json.load(site_config_file)
|
site_config = json.load(site_config_file)
|
||||||
return site_config
|
return site_config
|
||||||
|
|
||||||
|
|
||||||
def save_config(config):
|
def save_config(config):
|
||||||
with open(COMMON_SITE_CONFIG_FILE, 'w') as f:
|
with open(COMMON_SITE_CONFIG_FILE, "w") as f:
|
||||||
return json.dump(config, f, indent=1, sort_keys=True)
|
return json.dump(config, f, indent=1, sort_keys=True)
|
||||||
|
|
||||||
|
|
||||||
def get_password(env_var, default=None):
|
def get_password(env_var, default=None):
|
||||||
return os.environ.get(env_var) or get_password_from_secret(f"{env_var}_FILE") or default
|
return (
|
||||||
|
os.environ.get(env_var)
|
||||||
|
or get_password_from_secret(f"{env_var}_FILE")
|
||||||
|
or default
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def get_password_from_secret(env_var):
|
def get_password_from_secret(env_var):
|
||||||
@ -128,14 +129,14 @@ def get_password_from_secret(env_var):
|
|||||||
|
|
||||||
def get_s3_config():
|
def get_s3_config():
|
||||||
check_s3_environment_variables()
|
check_s3_environment_variables()
|
||||||
bucket = os.environ.get('BUCKET_NAME')
|
bucket = os.environ.get("BUCKET_NAME")
|
||||||
|
|
||||||
conn = boto3.client(
|
conn = boto3.client(
|
||||||
's3',
|
"s3",
|
||||||
region_name=os.environ.get('REGION'),
|
region_name=os.environ.get("REGION"),
|
||||||
aws_access_key_id=os.environ.get('ACCESS_KEY_ID'),
|
aws_access_key_id=os.environ.get("ACCESS_KEY_ID"),
|
||||||
aws_secret_access_key=os.environ.get('SECRET_ACCESS_KEY'),
|
aws_secret_access_key=os.environ.get("SECRET_ACCESS_KEY"),
|
||||||
endpoint_url=os.environ.get('ENDPOINT_URL')
|
endpoint_url=os.environ.get("ENDPOINT_URL"),
|
||||||
)
|
)
|
||||||
|
|
||||||
return conn, bucket
|
return conn, bucket
|
||||||
@ -164,7 +165,7 @@ def list_directories(path):
|
|||||||
def get_site_config_from_path(site_config_path):
|
def get_site_config_from_path(site_config_path):
|
||||||
site_config = dict()
|
site_config = dict()
|
||||||
if os.path.exists(site_config_path):
|
if os.path.exists(site_config_path):
|
||||||
with open(site_config_path, 'r') as sc:
|
with open(site_config_path) as sc:
|
||||||
site_config = json.load(sc)
|
site_config = json.load(sc)
|
||||||
return site_config
|
return site_config
|
||||||
|
|
||||||
@ -173,32 +174,35 @@ def set_key_in_site_config(key, site, site_config_path):
|
|||||||
site_config = get_site_config_from_path(site_config_path)
|
site_config = get_site_config_from_path(site_config_path)
|
||||||
value = site_config.get(key)
|
value = site_config.get(key)
|
||||||
if value:
|
if value:
|
||||||
print('Set {key} in site config for site: {site}'.format(key=key, site=site))
|
print(f"Set {key} in site config for site: {site}")
|
||||||
update_site_config(key, value,
|
update_site_config(
|
||||||
site_config_path=os.path.join(os.getcwd(), site, "site_config.json"))
|
key,
|
||||||
|
value,
|
||||||
|
site_config_path=os.path.join(os.getcwd(), site, "site_config.json"),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def check_s3_environment_variables():
|
def check_s3_environment_variables():
|
||||||
if 'BUCKET_NAME' not in os.environ:
|
if "BUCKET_NAME" not in os.environ:
|
||||||
print('Variable BUCKET_NAME not set')
|
print("Variable BUCKET_NAME not set")
|
||||||
exit(1)
|
exit(1)
|
||||||
|
|
||||||
if 'ACCESS_KEY_ID' not in os.environ:
|
if "ACCESS_KEY_ID" not in os.environ:
|
||||||
print('Variable ACCESS_KEY_ID not set')
|
print("Variable ACCESS_KEY_ID not set")
|
||||||
exit(1)
|
exit(1)
|
||||||
|
|
||||||
if 'SECRET_ACCESS_KEY' not in os.environ:
|
if "SECRET_ACCESS_KEY" not in os.environ:
|
||||||
print('Variable SECRET_ACCESS_KEY not set')
|
print("Variable SECRET_ACCESS_KEY not set")
|
||||||
exit(1)
|
exit(1)
|
||||||
|
|
||||||
if 'ENDPOINT_URL' not in os.environ:
|
if "ENDPOINT_URL" not in os.environ:
|
||||||
print('Variable ENDPOINT_URL not set')
|
print("Variable ENDPOINT_URL not set")
|
||||||
exit(1)
|
exit(1)
|
||||||
|
|
||||||
if 'BUCKET_DIR' not in os.environ:
|
if "BUCKET_DIR" not in os.environ:
|
||||||
print('Variable BUCKET_DIR not set')
|
print("Variable BUCKET_DIR not set")
|
||||||
exit(1)
|
exit(1)
|
||||||
|
|
||||||
if 'REGION' not in os.environ:
|
if "REGION" not in os.environ:
|
||||||
print('Variable REGION not set')
|
print("Variable REGION not set")
|
||||||
exit(1)
|
exit(1)
|
||||||
|
@ -44,14 +44,14 @@ If you want to use PostgreSQL instead, edit `.devcontainer/docker-compose.yml` a
|
|||||||
VSCode should automatically inquire you to install the required extensions, that can also be installed manually as follows:
|
VSCode should automatically inquire you to install the required extensions, that can also be installed manually as follows:
|
||||||
|
|
||||||
- Install Remote - Containers for VSCode
|
- Install Remote - Containers for VSCode
|
||||||
- through command line `code --install-extension ms-vscode-remote.remote-containers`
|
- through command line `code --install-extension ms-vscode-remote.remote-containers`
|
||||||
- clicking on the Install button in the Vistual Studio Marketplace: [Remote - Containers](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-containers)
|
- clicking on the Install button in the Vistual Studio Marketplace: [Remote - Containers](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-containers)
|
||||||
- View: Extensions command in VSCode (Windows: Ctrl+Shift+X; macOS: Cmd+Shift+X) then search for extension `ms-vscode-remote.remote-containers`
|
- View: Extensions command in VSCode (Windows: Ctrl+Shift+X; macOS: Cmd+Shift+X) then search for extension `ms-vscode-remote.remote-containers`
|
||||||
|
|
||||||
After the extensions are installed, you can:
|
After the extensions are installed, you can:
|
||||||
|
|
||||||
- Open frappe_docker folder in VS Code.
|
- Open frappe_docker folder in VS Code.
|
||||||
- `code .`
|
- `code .`
|
||||||
- Launch the command, from Command Palette (Ctrl + Shift + P) `Execute Remote Containers : Reopen in Container`. You can also click in the bottom left corner to access the remote container menu.
|
- Launch the command, from Command Palette (Ctrl + Shift + P) `Execute Remote Containers : Reopen in Container`. You can also click in the bottom left corner to access the remote container menu.
|
||||||
|
|
||||||
Notes:
|
Notes:
|
||||||
@ -94,6 +94,7 @@ code Procfile
|
|||||||
```
|
```
|
||||||
|
|
||||||
Or running the following command:
|
Or running the following command:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
sed -i '/redis/d' ./Procfile
|
sed -i '/redis/d' ./Procfile
|
||||||
```
|
```
|
||||||
@ -105,6 +106,7 @@ You can create a new site with the following command:
|
|||||||
```shell
|
```shell
|
||||||
bench new-site sitename --no-mariadb-socket
|
bench new-site sitename --no-mariadb-socket
|
||||||
```
|
```
|
||||||
|
|
||||||
sitename MUST end with .localhost for trying deployments locally.
|
sitename MUST end with .localhost for trying deployments locally.
|
||||||
|
|
||||||
for example:
|
for example:
|
||||||
@ -234,7 +236,7 @@ The first step is installing and updating the required software. Usually the fra
|
|||||||
/workspace/development/frappe-bench/env/bin/python -m pip install --upgrade jupyter ipykernel ipython
|
/workspace/development/frappe-bench/env/bin/python -m pip install --upgrade jupyter ipykernel ipython
|
||||||
```
|
```
|
||||||
|
|
||||||
Then, run the commmand `Python: Show Python interactive window` from the VSCode command palette.
|
Then, run the command `Python: Show Python interactive window` from the VSCode command palette.
|
||||||
|
|
||||||
Replace `mysite.localhost` with your site and run the following code in a Jupyter cell:
|
Replace `mysite.localhost` with your site and run the following code in a Jupyter cell:
|
||||||
|
|
||||||
@ -259,7 +261,6 @@ Example shows the queries to be executed for site `localhost`
|
|||||||
|
|
||||||
Open sites/localhost/site_config.json:
|
Open sites/localhost/site_config.json:
|
||||||
|
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
code sites/localhost/site_config.json
|
code sites/localhost/site_config.json
|
||||||
```
|
```
|
||||||
@ -286,6 +287,7 @@ EXIT;
|
|||||||
In case you don't use VSCode, you may start the containers manually with the following command:
|
In case you don't use VSCode, you may start the containers manually with the following command:
|
||||||
|
|
||||||
### Running the containers
|
### Running the containers
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
docker-compose -f .devcontainer/docker-compose.yml up -d
|
docker-compose -f .devcontainer/docker-compose.yml up -d
|
||||||
```
|
```
|
||||||
|
@ -1,76 +1,81 @@
|
|||||||
{
|
{
|
||||||
// Use IntelliSense to learn about possible attributes.
|
// Use IntelliSense to learn about possible attributes.
|
||||||
// Hover to view descriptions of existing attributes.
|
// Hover to view descriptions of existing attributes.
|
||||||
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
|
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
|
||||||
"version": "0.2.0",
|
"version": "0.2.0",
|
||||||
"configurations": [
|
"configurations": [
|
||||||
{
|
{
|
||||||
"name": "Bench Web",
|
"name": "Bench Web",
|
||||||
"type": "python",
|
"type": "python",
|
||||||
"request": "launch",
|
"request": "launch",
|
||||||
"program": "${workspaceFolder}/frappe-bench/apps/frappe/frappe/utils/bench_helper.py",
|
"program": "${workspaceFolder}/frappe-bench/apps/frappe/frappe/utils/bench_helper.py",
|
||||||
"args": [
|
"args": [
|
||||||
"frappe", "serve", "--port", "8000", "--noreload", "--nothreading"
|
"frappe",
|
||||||
],
|
"serve",
|
||||||
"pythonPath": "${workspaceFolder}/frappe-bench/env/bin/python",
|
"--port",
|
||||||
"cwd": "${workspaceFolder}/frappe-bench/sites",
|
"8000",
|
||||||
"env": {
|
"--noreload",
|
||||||
"DEV_SERVER": "1"
|
"--nothreading"
|
||||||
}
|
],
|
||||||
},
|
"pythonPath": "${workspaceFolder}/frappe-bench/env/bin/python",
|
||||||
{
|
"cwd": "${workspaceFolder}/frappe-bench/sites",
|
||||||
"name": "Bench Default Worker",
|
"env": {
|
||||||
"type": "python",
|
"DEV_SERVER": "1"
|
||||||
"request": "launch",
|
}
|
||||||
"program": "${workspaceFolder}/frappe-bench/apps/frappe/frappe/utils/bench_helper.py",
|
},
|
||||||
"args": [
|
{
|
||||||
"frappe", "worker", "--queue", "default"
|
"name": "Bench Default Worker",
|
||||||
],
|
"type": "python",
|
||||||
"pythonPath": "${workspaceFolder}/frappe-bench/env/bin/python",
|
"request": "launch",
|
||||||
"cwd": "${workspaceFolder}/frappe-bench/sites",
|
"program": "${workspaceFolder}/frappe-bench/apps/frappe/frappe/utils/bench_helper.py",
|
||||||
"env": {
|
"args": ["frappe", "worker", "--queue", "default"],
|
||||||
"DEV_SERVER": "1"
|
"pythonPath": "${workspaceFolder}/frappe-bench/env/bin/python",
|
||||||
}
|
"cwd": "${workspaceFolder}/frappe-bench/sites",
|
||||||
},
|
"env": {
|
||||||
{
|
"DEV_SERVER": "1"
|
||||||
"name": "Bench Short Worker",
|
}
|
||||||
"type": "python",
|
},
|
||||||
"request": "launch",
|
{
|
||||||
"program": "${workspaceFolder}/frappe-bench/apps/frappe/frappe/utils/bench_helper.py",
|
"name": "Bench Short Worker",
|
||||||
"args": [
|
"type": "python",
|
||||||
"frappe", "worker", "--queue", "short"
|
"request": "launch",
|
||||||
],
|
"program": "${workspaceFolder}/frappe-bench/apps/frappe/frappe/utils/bench_helper.py",
|
||||||
"pythonPath": "${workspaceFolder}/frappe-bench/env/bin/python",
|
"args": ["frappe", "worker", "--queue", "short"],
|
||||||
"cwd": "${workspaceFolder}/frappe-bench/sites",
|
"pythonPath": "${workspaceFolder}/frappe-bench/env/bin/python",
|
||||||
"env": {
|
"cwd": "${workspaceFolder}/frappe-bench/sites",
|
||||||
"DEV_SERVER": "1"
|
"env": {
|
||||||
}
|
"DEV_SERVER": "1"
|
||||||
},
|
}
|
||||||
{
|
},
|
||||||
"name": "Bench Long Worker",
|
{
|
||||||
"type": "python",
|
"name": "Bench Long Worker",
|
||||||
"request": "launch",
|
"type": "python",
|
||||||
"program": "${workspaceFolder}/frappe-bench/apps/frappe/frappe/utils/bench_helper.py",
|
"request": "launch",
|
||||||
"args": [
|
"program": "${workspaceFolder}/frappe-bench/apps/frappe/frappe/utils/bench_helper.py",
|
||||||
"frappe", "worker", "--queue", "long"
|
"args": ["frappe", "worker", "--queue", "long"],
|
||||||
],
|
"pythonPath": "${workspaceFolder}/frappe-bench/env/bin/python",
|
||||||
"pythonPath": "${workspaceFolder}/frappe-bench/env/bin/python",
|
"cwd": "${workspaceFolder}/frappe-bench/sites",
|
||||||
"cwd": "${workspaceFolder}/frappe-bench/sites",
|
"env": {
|
||||||
"env": {
|
"DEV_SERVER": "1"
|
||||||
"DEV_SERVER": "1"
|
}
|
||||||
}
|
},
|
||||||
},
|
{
|
||||||
{
|
"name": "Honcho SocketIO Watch Schedule Worker",
|
||||||
"name": "Honcho SocketIO Watch Schedule Worker",
|
"type": "python",
|
||||||
"type": "python",
|
"request": "launch",
|
||||||
"request": "launch",
|
"program": "/home/frappe/.local/bin/honcho",
|
||||||
"program": "/home/frappe/.local/bin/honcho",
|
"pythonPath": "${workspaceFolder}/frappe-bench/env/bin/python",
|
||||||
"pythonPath": "${workspaceFolder}/frappe-bench/env/bin/python",
|
"cwd": "${workspaceFolder}/frappe-bench",
|
||||||
"cwd": "${workspaceFolder}/frappe-bench",
|
"console": "internalConsole",
|
||||||
"console": "internalConsole",
|
"args": [
|
||||||
"args": [
|
"start",
|
||||||
"start", "socketio", "watch", "schedule", "worker_short", "worker_long", "worker_default"
|
"socketio",
|
||||||
]
|
"watch",
|
||||||
}
|
"schedule",
|
||||||
]
|
"worker_short",
|
||||||
|
"worker_long",
|
||||||
|
"worker_default"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
}
|
}
|
||||||
|
@ -1,3 +1,3 @@
|
|||||||
{
|
{
|
||||||
"debug.node.autoAttach": "disabled"
|
"debug.node.autoAttach": "disabled"
|
||||||
}
|
}
|
@ -42,7 +42,7 @@ version: "3.7"
|
|||||||
|
|
||||||
services:
|
services:
|
||||||
mariadb-master:
|
mariadb-master:
|
||||||
image: 'bitnami/mariadb:10.3'
|
image: "bitnami/mariadb:10.3"
|
||||||
deploy:
|
deploy:
|
||||||
restart_policy:
|
restart_policy:
|
||||||
condition: on-failure
|
condition: on-failure
|
||||||
@ -54,7 +54,7 @@ services:
|
|||||||
secrets:
|
secrets:
|
||||||
- frappe-mariadb-root-password
|
- frappe-mariadb-root-password
|
||||||
volumes:
|
volumes:
|
||||||
- 'mariadb_master_data:/bitnami/mariadb'
|
- "mariadb_master_data:/bitnami/mariadb"
|
||||||
environment:
|
environment:
|
||||||
- MARIADB_REPLICATION_MODE=master
|
- MARIADB_REPLICATION_MODE=master
|
||||||
- MARIADB_REPLICATION_USER=repl_user
|
- MARIADB_REPLICATION_USER=repl_user
|
||||||
@ -62,7 +62,7 @@ services:
|
|||||||
- MARIADB_ROOT_PASSWORD_FILE=/run/secrets/frappe-mariadb-root-password
|
- MARIADB_ROOT_PASSWORD_FILE=/run/secrets/frappe-mariadb-root-password
|
||||||
|
|
||||||
mariadb-slave:
|
mariadb-slave:
|
||||||
image: 'bitnami/mariadb:10.3'
|
image: "bitnami/mariadb:10.3"
|
||||||
deploy:
|
deploy:
|
||||||
restart_policy:
|
restart_policy:
|
||||||
condition: on-failure
|
condition: on-failure
|
||||||
@ -74,7 +74,7 @@ services:
|
|||||||
secrets:
|
secrets:
|
||||||
- frappe-mariadb-root-password
|
- frappe-mariadb-root-password
|
||||||
volumes:
|
volumes:
|
||||||
- 'mariadb_slave_data:/bitnami/mariadb'
|
- "mariadb_slave_data:/bitnami/mariadb"
|
||||||
environment:
|
environment:
|
||||||
- MARIADB_REPLICATION_MODE=slave
|
- MARIADB_REPLICATION_MODE=slave
|
||||||
- MARIADB_REPLICATION_USER=repl_user
|
- MARIADB_REPLICATION_USER=repl_user
|
||||||
@ -265,6 +265,7 @@ Use environment variables:
|
|||||||
- `FRAPPE_VERSION` variable to be set to desired version of Frappe Framework. e.g. 12.7.0
|
- `FRAPPE_VERSION` variable to be set to desired version of Frappe Framework. e.g. 12.7.0
|
||||||
- `MARIADB_HOST=frappe-mariadb_mariadb-master`
|
- `MARIADB_HOST=frappe-mariadb_mariadb-master`
|
||||||
- `SITES` variable is list of sites in back tick and separated by comma
|
- `SITES` variable is list of sites in back tick and separated by comma
|
||||||
|
|
||||||
```
|
```
|
||||||
SITES=`site1.example.com`,`site2.example.com`
|
SITES=`site1.example.com`,`site2.example.com`
|
||||||
```
|
```
|
||||||
@ -277,9 +278,9 @@ SITES=`site1.example.com`,`site2.example.com`
|
|||||||
4. Select network `frappe-network`
|
4. Select network `frappe-network`
|
||||||
5. Select Volume `frappe-bench-v13_sites-vol` and mount in container `/home/frappe/frappe-bench/sites`
|
5. Select Volume `frappe-bench-v13_sites-vol` and mount in container `/home/frappe/frappe-bench/sites`
|
||||||
6. Env variables:
|
6. Env variables:
|
||||||
- MYSQL_ROOT_PASSWORD=longsecretpassword
|
- MYSQL_ROOT_PASSWORD=longsecretpassword
|
||||||
- SITE_NAME=site1.example.com
|
- SITE_NAME=site1.example.com
|
||||||
- INSTALL_APPS=erpnext
|
- INSTALL_APPS=erpnext
|
||||||
7. Start container
|
7. Start container
|
||||||
|
|
||||||
### Migrate Sites job
|
### Migrate Sites job
|
||||||
@ -290,6 +291,5 @@ SITES=`site1.example.com`,`site2.example.com`
|
|||||||
4. Select network `frappe-network`
|
4. Select network `frappe-network`
|
||||||
5. Select Volume `frappe-bench-v13_sites-vol` and mount in container `/home/frappe/frappe-bench/sites`
|
5. Select Volume `frappe-bench-v13_sites-vol` and mount in container `/home/frappe/frappe-bench/sites`
|
||||||
6. Env variables:
|
6. Env variables:
|
||||||
- MAINTENANCE_MODE=1
|
- MAINTENANCE_MODE=1
|
||||||
7. Start container
|
7. Start container
|
||||||
|
|
||||||
|
@ -17,17 +17,17 @@ cp env-example .env
|
|||||||
To get started, copy the existing `env-example` file to `.env`. By default, the file will contain the following variables:
|
To get started, copy the existing `env-example` file to `.env`. By default, the file will contain the following variables:
|
||||||
|
|
||||||
- `VERSION=edge`
|
- `VERSION=edge`
|
||||||
- In this case, `edge` corresponds to `develop`. To setup any other version, you may use the branch name or version specific tags. (eg. v13.0.0, version-12, v11.1.15, v11)
|
- In this case, `edge` corresponds to `develop`. To setup any other version, you may use the branch name or version specific tags. (eg. v13.0.0, version-12, v11.1.15, v11)
|
||||||
- `MYSQL_ROOT_PASSWORD=admin`
|
- `MYSQL_ROOT_PASSWORD=admin`
|
||||||
- Bootstraps a MariaDB container with this value set as the root password. If a managed MariaDB instance is used, there is no need to set the password here.
|
- Bootstraps a MariaDB container with this value set as the root password. If a managed MariaDB instance is used, there is no need to set the password here.
|
||||||
- `MARIADB_HOST=mariadb`
|
- `MARIADB_HOST=mariadb`
|
||||||
- Sets the hostname to `mariadb`. This is required if the database is managed by the containerized MariaDB instance.
|
- Sets the hostname to `mariadb`. This is required if the database is managed by the containerized MariaDB instance.
|
||||||
- In case of a separately managed database setups, set the value to the database's hostname/IP/domain.
|
- In case of a separately managed database setups, set the value to the database's hostname/IP/domain.
|
||||||
- `SITES=site1.domain.com,site2.domain.com`
|
- `SITES=site1.domain.com,site2.domain.com`
|
||||||
- List of sites that are part of the deployment "bench" Each site is separated by a comma(,).
|
- List of sites that are part of the deployment "bench" Each site is separated by a comma(,).
|
||||||
- If LetsEncrypt is being setup, make sure that the DNS for all the site's domains correctly point to the current instance.
|
- If LetsEncrypt is being setup, make sure that the DNS for all the site's domains correctly point to the current instance.
|
||||||
- `LETSENCRYPT_EMAIL=your.email@your.domain.com`
|
- `LETSENCRYPT_EMAIL=your.email@your.domain.com`
|
||||||
- Email for LetsEncrypt expiry notification. This is only required if you are setting up LetsEncrypt.
|
- Email for LetsEncrypt expiry notification. This is only required if you are setting up LetsEncrypt.
|
||||||
|
|
||||||
Notes:
|
Notes:
|
||||||
|
|
||||||
@ -123,46 +123,49 @@ Notes:
|
|||||||
## Docker containers
|
## Docker containers
|
||||||
|
|
||||||
This repository contains the following docker-compose files, each one containing the described images:
|
This repository contains the following docker-compose files, each one containing the described images:
|
||||||
* docker-compose-common.yml
|
|
||||||
* redis-cache
|
|
||||||
* volume: redis-cache-vol
|
|
||||||
* redis-queue
|
|
||||||
* volume: redis-queue-vol
|
|
||||||
* redis-socketio
|
|
||||||
* volume: redis-socketio-vol
|
|
||||||
* mariadb: main database
|
|
||||||
* volume: mariadb-vol
|
|
||||||
* docker-compose-erpnext.yml
|
|
||||||
* erpnext-nginx: serves static assets and proxies web request to the appropriate container, allowing to offer all services on the same port.
|
|
||||||
* volume: assets-vol
|
|
||||||
* erpnext-python: main application code
|
|
||||||
* frappe-socketio: enables realtime communication to the user interface through websockets
|
|
||||||
* frappe-worker-default: background runner
|
|
||||||
* frappe-worker-short: background runner for short-running jobs
|
|
||||||
* frappe-worker-long: background runner for long-running jobs
|
|
||||||
* frappe-schedule
|
|
||||||
|
|
||||||
* docker-compose-frappe.yml
|
- docker-compose-common.yml
|
||||||
* frappe-nginx: serves static assets and proxies web request to the appropriate container, allowing to offer all services on the same port.
|
- redis-cache
|
||||||
* volume: assets-vol, sites-vol
|
- volume: redis-cache-vol
|
||||||
* erpnext-python: main application code
|
- redis-queue
|
||||||
* volume: sites-vol
|
- volume: redis-queue-vol
|
||||||
* frappe-socketio: enables realtime communication to the user interface through websockets
|
- redis-socketio
|
||||||
* volume: sites-vol
|
- volume: redis-socketio-vol
|
||||||
* frappe-worker-default: background runner
|
- mariadb: main database
|
||||||
* volume: sites-vol
|
- volume: mariadb-vol
|
||||||
* frappe-worker-short: background runner for short-running jobs
|
- docker-compose-erpnext.yml
|
||||||
* volume: sites-vol
|
|
||||||
* frappe-worker-long: background runner for long-running jobs
|
|
||||||
* volume: sites-vol
|
|
||||||
* frappe-schedule
|
|
||||||
* volume: sites-vol
|
|
||||||
|
|
||||||
* docker-compose-networks.yml: this yaml define the network to communicate with *Letsencrypt Nginx Proxy Companion*.
|
- erpnext-nginx: serves static assets and proxies web request to the appropriate container, allowing to offer all services on the same port.
|
||||||
|
- volume: assets-vol
|
||||||
|
- erpnext-python: main application code
|
||||||
|
- frappe-socketio: enables realtime communication to the user interface through websockets
|
||||||
|
- frappe-worker-default: background runner
|
||||||
|
- frappe-worker-short: background runner for short-running jobs
|
||||||
|
- frappe-worker-long: background runner for long-running jobs
|
||||||
|
- frappe-schedule
|
||||||
|
|
||||||
* erpnext-publish.yml: this yml extends erpnext-nginx service to publish port 80, can only be used with docker-compose-erpnext.yml
|
- docker-compose-frappe.yml
|
||||||
|
|
||||||
* frappe-publish.yml: this yml extends frappe-nginx service to publish port 80, can only be used with docker-compose-frappe.yml
|
- frappe-nginx: serves static assets and proxies web request to the appropriate container, allowing to offer all services on the same port.
|
||||||
|
- volume: assets-vol, sites-vol
|
||||||
|
- erpnext-python: main application code
|
||||||
|
- volume: sites-vol
|
||||||
|
- frappe-socketio: enables realtime communication to the user interface through websockets
|
||||||
|
- volume: sites-vol
|
||||||
|
- frappe-worker-default: background runner
|
||||||
|
- volume: sites-vol
|
||||||
|
- frappe-worker-short: background runner for short-running jobs
|
||||||
|
- volume: sites-vol
|
||||||
|
- frappe-worker-long: background runner for long-running jobs
|
||||||
|
- volume: sites-vol
|
||||||
|
- frappe-schedule
|
||||||
|
- volume: sites-vol
|
||||||
|
|
||||||
|
- docker-compose-networks.yml: this yaml define the network to communicate with _Letsencrypt Nginx Proxy Companion_.
|
||||||
|
|
||||||
|
- erpnext-publish.yml: this yml extends erpnext-nginx service to publish port 80, can only be used with docker-compose-erpnext.yml
|
||||||
|
|
||||||
|
- frappe-publish.yml: this yml extends frappe-nginx service to publish port 80, can only be used with docker-compose-frappe.yml
|
||||||
|
|
||||||
## Updating and Migrating Sites
|
## Updating and Migrating Sites
|
||||||
|
|
||||||
|
@ -26,39 +26,39 @@ cp env-production .env
|
|||||||
To get started, copy the existing `env-local` or `env-production` file to `.env`. By default, the file will contain the following variables:
|
To get started, copy the existing `env-local` or `env-production` file to `.env`. By default, the file will contain the following variables:
|
||||||
|
|
||||||
- `ERPNEXT_VERSION=edge`
|
- `ERPNEXT_VERSION=edge`
|
||||||
- In this case, `edge` corresponds to `develop`. To setup any other version, you may use the branch name or version specific tags. (eg. v13.0.0, version-12, v11.1.15, v11).
|
- In this case, `edge` corresponds to `develop`. To setup any other version, you may use the branch name or version specific tags. (eg. v13.0.0, version-12, v11.1.15, v11).
|
||||||
- `FRAPPE_VERSION=edge`
|
- `FRAPPE_VERSION=edge`
|
||||||
- In this case, `edge` corresponds to `develop`. To setup any other version, you may use the branch name or version specific tags. (eg. v13.0.0, version-12, v11.1.15, v11).
|
- In this case, `edge` corresponds to `develop`. To setup any other version, you may use the branch name or version specific tags. (eg. v13.0.0, version-12, v11.1.15, v11).
|
||||||
- `MARIADB_HOST=mariadb`
|
- `MARIADB_HOST=mariadb`
|
||||||
- Sets the hostname to `mariadb`. This is required if the database is managed by the containerized MariaDB instance.
|
- Sets the hostname to `mariadb`. This is required if the database is managed by the containerized MariaDB instance.
|
||||||
- `MYSQL_ROOT_PASSWORD=admin`
|
- `MYSQL_ROOT_PASSWORD=admin`
|
||||||
- Bootstraps a MariaDB container with this value set as the root password. If a managed MariaDB instance is used, there is no need to set the password here.
|
- Bootstraps a MariaDB container with this value set as the root password. If a managed MariaDB instance is used, there is no need to set the password here.
|
||||||
- In case of a separately managed database setups, set the value to the database's hostname/IP/domain.
|
- In case of a separately managed database setups, set the value to the database's hostname/IP/domain.
|
||||||
- `SITE_NAME=erp.example.com`
|
- `SITE_NAME=erp.example.com`
|
||||||
- Creates this site after starting all services and installs ERPNext. Site name must be resolvable by users machines and the ERPNext components. e.g. `erp.example.com` or `mysite.localhost`.
|
- Creates this site after starting all services and installs ERPNext. Site name must be resolvable by users machines and the ERPNext components. e.g. `erp.example.com` or `mysite.localhost`.
|
||||||
- ``SITES=`erp.example.com` ``
|
- `` SITES=`erp.example.com` ``
|
||||||
- List of sites that are part of the deployment "bench" Each site is separated by a comma(,) and quoted in backtick (`). By default site created by ``SITE_NAME`` variable is added here.
|
- List of sites that are part of the deployment "bench" Each site is separated by a comma(,) and quoted in backtick (`). By default site created by `SITE_NAME` variable is added here.
|
||||||
- If LetsEncrypt is being setup, make sure that the DNS for all the site's domains correctly point to the current instance.
|
- If LetsEncrypt is being setup, make sure that the DNS for all the site's domains correctly point to the current instance.
|
||||||
- `DB_ROOT_USER=root`
|
- `DB_ROOT_USER=root`
|
||||||
- MariaDB root username
|
- MariaDB root username
|
||||||
- `ADMIN_PASSWORD=admin`
|
- `ADMIN_PASSWORD=admin`
|
||||||
- Password for the `Administrator` user, credentials after install `Administrator:$ADMIN_PASSWORD`.
|
- Password for the `Administrator` user, credentials after install `Administrator:$ADMIN_PASSWORD`.
|
||||||
- `INSTALL_APPS=erpnext`
|
- `INSTALL_APPS=erpnext`
|
||||||
- Apps to install, the app must be already in the container image, to install other application read the [instructions on installing custom apps](./custom-apps-for-production.md).
|
- Apps to install, the app must be already in the container image, to install other application read the [instructions on installing custom apps](./custom-apps-for-production.md).
|
||||||
- `LETSENCRYPT_EMAIL=email@example.com`
|
- `LETSENCRYPT_EMAIL=email@example.com`
|
||||||
- Email for LetsEncrypt expiry notification. This is only required if you are setting up LetsEncrypt.
|
- Email for LetsEncrypt expiry notification. This is only required if you are setting up LetsEncrypt.
|
||||||
- `ENTRYPOINT_LABEL=traefik.http.routers.erpnext-nginx.entrypoints=websecure`
|
- `ENTRYPOINT_LABEL=traefik.http.routers.erpnext-nginx.entrypoints=websecure`
|
||||||
- Related to the traefik configuration, says all traffic from outside should come from HTTP or HTTPS, for local development should be web, for production websecure. if redirection is needed, read below.
|
- Related to the traefik configuration, says all traffic from outside should come from HTTP or HTTPS, for local development should be web, for production websecure. if redirection is needed, read below.
|
||||||
- `CERT_RESOLVER_LABEL=traefik.http.routers.erpnext-nginx.tls.certresolver=myresolver`
|
- `CERT_RESOLVER_LABEL=traefik.http.routers.erpnext-nginx.tls.certresolver=myresolver`
|
||||||
- Which traefik resolver to use to get TLS certificate, sets `erpnext.local.no-cert-resolver` for local setup.
|
- Which traefik resolver to use to get TLS certificate, sets `erpnext.local.no-cert-resolver` for local setup.
|
||||||
- ``HTTPS_REDIRECT_RULE_LABEL=traefik.http.routers.http-catchall.rule=hostregexp(`{host:.+}`) ``
|
- `` HTTPS_REDIRECT_RULE_LABEL=traefik.http.routers.http-catchall.rule=hostregexp(`{host:.+}`) ``
|
||||||
- Related to the traefik https redirection configuration, sets `erpnext.local.no-redirect-rule` for local setup.
|
- Related to the traefik https redirection configuration, sets `erpnext.local.no-redirect-rule` for local setup.
|
||||||
- `HTTPS_REDIRECT_ENTRYPOINT_LABEL=traefik.http.routers.http-catchall.entrypoints=web`
|
- `HTTPS_REDIRECT_ENTRYPOINT_LABEL=traefik.http.routers.http-catchall.entrypoints=web`
|
||||||
- Related to the traefik https redirection configuration, sets `erpnext.local.no-entrypoint` for local setup.
|
- Related to the traefik https redirection configuration, sets `erpnext.local.no-entrypoint` for local setup.
|
||||||
- `HTTPS_REDIRECT_MIDDLEWARE_LABEL=traefik.http.routers.http-catchall.middlewares=redirect-to-https`
|
- `HTTPS_REDIRECT_MIDDLEWARE_LABEL=traefik.http.routers.http-catchall.middlewares=redirect-to-https`
|
||||||
- Related to the traefik https redirection configuration, sets `erpnext.local.no-middleware` for local setup.
|
- Related to the traefik https redirection configuration, sets `erpnext.local.no-middleware` for local setup.
|
||||||
- `HTTPS_USE_REDIRECT_MIDDLEWARE_LABEL=traefik.http.middlewares.redirect-to-https.redirectscheme.scheme=https`
|
- `HTTPS_USE_REDIRECT_MIDDLEWARE_LABEL=traefik.http.middlewares.redirect-to-https.redirectscheme.scheme=https`
|
||||||
- Related to the traefik https redirection configuration, sets `erpnext.local-no-redirect-middleware` for local setup.
|
- Related to the traefik https redirection configuration, sets `erpnext.local-no-redirect-middleware` for local setup.
|
||||||
|
|
||||||
Notes:
|
Notes:
|
||||||
|
|
||||||
@ -77,7 +77,7 @@ Make sure to replace `<project-name>` with the desired name you wish to set for
|
|||||||
|
|
||||||
Notes:
|
Notes:
|
||||||
|
|
||||||
- If it is the first time running and site is being initialized, *it can take multiple minutes for the site to be up*. Monitor `site-creator` container logs to check progress. Use command `docker logs <project-name>_site-creator_1 -f`
|
- If it is the first time running and site is being initialized, _it can take multiple minutes for the site to be up_. Monitor `site-creator` container logs to check progress. Use command `docker logs <project-name>_site-creator_1 -f`
|
||||||
- After the site is ready the username is `Administrator` and the password is `$ADMIN_PASSWORD`
|
- After the site is ready the username is `Administrator` and the password is `$ADMIN_PASSWORD`
|
||||||
- The local deployment is for testing and REST API development purpose only
|
- The local deployment is for testing and REST API development purpose only
|
||||||
- A complete development environment is available [here](../development)
|
- A complete development environment is available [here](../development)
|
||||||
@ -86,32 +86,32 @@ Notes:
|
|||||||
|
|
||||||
The docker-compose file contains following services:
|
The docker-compose file contains following services:
|
||||||
|
|
||||||
* traefik: manages letsencrypt
|
- traefik: manages letsencrypt
|
||||||
* volume: cert-vol
|
- volume: cert-vol
|
||||||
* redis-cache: cache store
|
- redis-cache: cache store
|
||||||
* volume: redis-cache-vol
|
- volume: redis-cache-vol
|
||||||
* redis-queue: used by workers
|
- redis-queue: used by workers
|
||||||
* volume: redis-queue-vol
|
- volume: redis-queue-vol
|
||||||
* redis-socketio: used by socketio service
|
- redis-socketio: used by socketio service
|
||||||
* volume: redis-socketio-vol
|
- volume: redis-socketio-vol
|
||||||
* mariadb: main database
|
- mariadb: main database
|
||||||
* volume: mariadb-vol
|
- volume: mariadb-vol
|
||||||
* erpnext-nginx: serves static assets and proxies web request to the appropriate container, allowing to offer all services on the same port.
|
- erpnext-nginx: serves static assets and proxies web request to the appropriate container, allowing to offer all services on the same port.
|
||||||
* volume: assets-vol and sites-vol
|
- volume: assets-vol and sites-vol
|
||||||
* erpnext-python: main application code
|
- erpnext-python: main application code
|
||||||
* volume: sites-vol and sites-vol
|
- volume: sites-vol and sites-vol
|
||||||
* frappe-socketio: enables realtime communication to the user interface through websockets
|
- frappe-socketio: enables realtime communication to the user interface through websockets
|
||||||
* volume: sites-vol
|
- volume: sites-vol
|
||||||
* erpnext-worker-default: background runner
|
- erpnext-worker-default: background runner
|
||||||
* volume: sites-vol
|
- volume: sites-vol
|
||||||
* erpnext-worker-short: background runner for short-running jobs
|
- erpnext-worker-short: background runner for short-running jobs
|
||||||
* volume: sites-vol
|
- volume: sites-vol
|
||||||
* erpnext-worker-long: background runner for long-running jobs
|
- erpnext-worker-long: background runner for long-running jobs
|
||||||
* volume: sites-vol
|
- volume: sites-vol
|
||||||
* erpnext-schedule
|
- erpnext-schedule
|
||||||
* volume: sites-vol
|
- volume: sites-vol
|
||||||
* site-creator: run once container to create new site.
|
- site-creator: run once container to create new site.
|
||||||
* volume: sites-vol
|
- volume: sites-vol
|
||||||
|
|
||||||
## Updating and Migrating Sites
|
## Updating and Migrating Sites
|
||||||
|
|
||||||
|
@ -13,8 +13,8 @@ Or specify environment variables instead of passing secrets as command arguments
|
|||||||
Note:
|
Note:
|
||||||
|
|
||||||
- Wait for the database service to start before trying to create a new site.
|
- Wait for the database service to start before trying to create a new site.
|
||||||
- If new site creation fails, retry after the MariaDB container is up and running.
|
- If new site creation fails, retry after the MariaDB container is up and running.
|
||||||
- If you're using a managed database instance, make sure that the database is running before setting up a new site.
|
- If you're using a managed database instance, make sure that the database is running before setting up a new site.
|
||||||
|
|
||||||
#### MariaDB Site
|
#### MariaDB Site
|
||||||
|
|
||||||
@ -72,7 +72,7 @@ Notes:
|
|||||||
|
|
||||||
## Add sites to proxy
|
## Add sites to proxy
|
||||||
|
|
||||||
Change `SITES` variable to the list of sites created encapsulated in backtick and separated by comma with no space. e.g. ``SITES=`site1.example.com`,`site2.example.com` ``.
|
Change `SITES` variable to the list of sites created encapsulated in backtick and separated by comma with no space. e.g. `` SITES=`site1.example.com`,`site2.example.com` ``.
|
||||||
|
|
||||||
Reload variables with following command.
|
Reload variables with following command.
|
||||||
|
|
||||||
@ -165,19 +165,19 @@ Note:
|
|||||||
- Volume must be mounted at location `/home/frappe/backups` for restoring sites
|
- Volume must be mounted at location `/home/frappe/backups` for restoring sites
|
||||||
- If no backup files are found in volume, it will use s3 credentials to pull backups
|
- If no backup files are found in volume, it will use s3 credentials to pull backups
|
||||||
- Backup structure for mounted volume or downloaded from s3:
|
- Backup structure for mounted volume or downloaded from s3:
|
||||||
- /home/frappe/backups
|
- /home/frappe/backups
|
||||||
- site1.domain.com
|
- site1.domain.com
|
||||||
- 20200420_162000
|
- 20200420_162000
|
||||||
- 20200420_162000-site1_domain_com-*
|
- 20200420_162000-site1_domain_com-\*
|
||||||
- site2.domain.com
|
- site2.domain.com
|
||||||
- 20200420_162000
|
- 20200420_162000
|
||||||
- 20200420_162000-site2_domain_com-*
|
- 20200420_162000-site2_domain_com-\*
|
||||||
|
|
||||||
## Edit configs
|
## Edit configs
|
||||||
|
|
||||||
Editing config manually might be required in some cases,
|
Editing config manually might be required in some cases,
|
||||||
one such case is to use Amazon RDS (or any other DBaaS).
|
one such case is to use Amazon RDS (or any other DBaaS).
|
||||||
For full instructions, refer to the [wiki](https://github.com/frappe/frappe/wiki/Using-Frappe-with-Amazon-RDS-(or-any-other-DBaaS)). Common question can be found in Issues and on forum.
|
For full instructions, refer to the [wiki](<https://github.com/frappe/frappe/wiki/Using-Frappe-with-Amazon-RDS-(or-any-other-DBaaS)>). Common question can be found in Issues and on forum.
|
||||||
|
|
||||||
`common_site_config.json` or `site_config.json` from `sites-vol` volume has to be edited using following command:
|
`common_site_config.json` or `site_config.json` from `sites-vol` volume has to be edited using following command:
|
||||||
|
|
||||||
@ -231,7 +231,6 @@ Notes:
|
|||||||
- Use it to install/uninstall custom apps, add system manager user, etc.
|
- Use it to install/uninstall custom apps, add system manager user, etc.
|
||||||
- To run the command as non root user add the command option `--user frappe`.
|
- To run the command as non root user add the command option `--user frappe`.
|
||||||
|
|
||||||
|
|
||||||
## Delete/Drop Site
|
## Delete/Drop Site
|
||||||
|
|
||||||
#### MariaDB Site
|
#### MariaDB Site
|
||||||
|
@ -1,15 +1,15 @@
|
|||||||
# Tips for moving deployments
|
# Tips for moving deployments
|
||||||
|
|
||||||
- Take regular automatic backups and push the files to S3 compatible cloud. Setup backup and push with cronjobs
|
- Take regular automatic backups and push the files to S3 compatible cloud. Setup backup and push with cronjobs
|
||||||
- Use regular cron for single machine installs
|
- Use regular cron for single machine installs
|
||||||
- Use [swarm-cronjob](https://github.com/crazy-max/swarm-cronjob) for docker swarm
|
- Use [swarm-cronjob](https://github.com/crazy-max/swarm-cronjob) for docker swarm
|
||||||
- Use Kubernetes CronJob
|
- Use Kubernetes CronJob
|
||||||
- It makes it easy to transfer data from cloud to any new deployment.
|
- It makes it easy to transfer data from cloud to any new deployment.
|
||||||
- They are just [site operations](site-operations.md) that can be manually pipelined as per need.
|
- They are just [site operations](site-operations.md) that can be manually pipelined as per need.
|
||||||
- Remember to restore encryption keys and other custom configuration from `site_config.json`.
|
- Remember to restore encryption keys and other custom configuration from `site_config.json`.
|
||||||
- Steps to move deployment:
|
- Steps to move deployment:
|
||||||
- [Take backup](site-operations.md#backup-sites)
|
- [Take backup](site-operations.md#backup-sites)
|
||||||
- [Push backup to cloud](site-operations.md#push-backup-to-s3-compatible-storage)
|
- [Push backup to cloud](site-operations.md#push-backup-to-s3-compatible-storage)
|
||||||
- Create new deployment type anywhere
|
- Create new deployment type anywhere
|
||||||
- [Restore backup from cloud](site-operations.md#restore-backups)
|
- [Restore backup from cloud](site-operations.md#restore-backups)
|
||||||
- [Restore `site_config.json` from cloud](site-operations.md#edit-configs)
|
- [Restore `site_config.json` from cloud](site-operations.md#edit-configs)
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
version: '3'
|
version: "3"
|
||||||
|
|
||||||
services:
|
services:
|
||||||
redis-cache:
|
redis-cache:
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
version: '3'
|
version: "3"
|
||||||
|
|
||||||
services:
|
services:
|
||||||
erpnext-nginx:
|
erpnext-nginx:
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
version: '3'
|
version: "3"
|
||||||
|
|
||||||
services:
|
services:
|
||||||
frappe-nginx:
|
frappe-nginx:
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
version: '3'
|
version: "3"
|
||||||
|
|
||||||
networks:
|
networks:
|
||||||
default:
|
default:
|
||||||
|
@ -1,26 +1,26 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
print_group() {
|
print_group() {
|
||||||
echo ::endgroup::
|
echo ::endgroup::
|
||||||
echo "::group::$*"
|
echo "::group::$*"
|
||||||
}
|
}
|
||||||
|
|
||||||
ping_site() {
|
ping_site() {
|
||||||
print_group "Ping site $SITE_NAME"
|
print_group "Ping site $SITE_NAME"
|
||||||
|
|
||||||
echo Ping version
|
echo Ping version
|
||||||
ping_res=$(curl -sS "http://$SITE_NAME/api/method/version")
|
ping_res=$(curl -sS "http://$SITE_NAME/api/method/version")
|
||||||
echo "$ping_res"
|
echo "$ping_res"
|
||||||
if [[ -z $(echo "$ping_res" | grep "message" || echo "") ]]; then
|
if [[ -z $(echo "$ping_res" | grep "message" || echo "") ]]; then
|
||||||
echo "Ping failed"
|
echo "Ping failed"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo Check index
|
echo Check index
|
||||||
index_res=$(curl -sS "http://$SITE_NAME")
|
index_res=$(curl -sS "http://$SITE_NAME")
|
||||||
if [[ -n $(echo "$index_res" | grep "Internal Server Error" || echo "") ]]; then
|
if [[ -n $(echo "$index_res" | grep "Internal Server Error" || echo "") ]]; then
|
||||||
echo "Index check failed"
|
echo "Index check failed"
|
||||||
echo "$index_res"
|
echo "$index_res"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
@ -7,63 +7,63 @@ source tests/functions.sh
|
|||||||
project_name=frappe_bench_00
|
project_name=frappe_bench_00
|
||||||
|
|
||||||
docker_compose_with_args() {
|
docker_compose_with_args() {
|
||||||
# shellcheck disable=SC2068
|
# shellcheck disable=SC2068
|
||||||
docker-compose \
|
docker-compose \
|
||||||
-p $project_name \
|
-p $project_name \
|
||||||
-f installation/docker-compose-common.yml \
|
-f installation/docker-compose-common.yml \
|
||||||
-f installation/docker-compose-frappe.yml \
|
-f installation/docker-compose-frappe.yml \
|
||||||
-f installation/frappe-publish.yml \
|
-f installation/frappe-publish.yml \
|
||||||
$@
|
$@
|
||||||
}
|
}
|
||||||
|
|
||||||
check_migration_complete() {
|
check_migration_complete() {
|
||||||
print_group Check migration
|
print_group Check migration
|
||||||
|
|
||||||
container_id=$(docker_compose_with_args ps -q frappe-python)
|
container_id=$(docker_compose_with_args ps -q frappe-python)
|
||||||
cmd="docker logs ${container_id} 2>&1 | grep 'Starting gunicorn' || echo ''"
|
cmd="docker logs ${container_id} 2>&1 | grep 'Starting gunicorn' || echo ''"
|
||||||
|
worker_log=$(eval "$cmd")
|
||||||
|
INCREMENT=0
|
||||||
|
|
||||||
|
while [[ ${worker_log} != *"Starting gunicorn"* && ${INCREMENT} -lt 120 ]]; do
|
||||||
|
sleep 3
|
||||||
|
((INCREMENT = INCREMENT + 1))
|
||||||
|
echo "Wait for migration to complete..."
|
||||||
worker_log=$(eval "$cmd")
|
worker_log=$(eval "$cmd")
|
||||||
INCREMENT=0
|
if [[ ${worker_log} != *"Starting gunicorn"* && ${INCREMENT} -eq 120 ]]; then
|
||||||
|
echo Migration timeout
|
||||||
|
docker logs "${container_id}"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
while [[ ${worker_log} != *"Starting gunicorn"* && ${INCREMENT} -lt 120 ]]; do
|
echo Migration Log
|
||||||
sleep 3
|
docker logs "${container_id}"
|
||||||
((INCREMENT = INCREMENT + 1))
|
|
||||||
echo "Wait for migration to complete..."
|
|
||||||
worker_log=$(eval "$cmd")
|
|
||||||
if [[ ${worker_log} != *"Starting gunicorn"* && ${INCREMENT} -eq 120 ]]; then
|
|
||||||
echo Migration timeout
|
|
||||||
docker logs "${container_id}"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
echo Migration Log
|
|
||||||
docker logs "${container_id}"
|
|
||||||
}
|
}
|
||||||
|
|
||||||
check_health() {
|
check_health() {
|
||||||
print_group Loop health check
|
print_group Loop health check
|
||||||
|
|
||||||
docker run --name frappe_doctor \
|
docker run --name frappe_doctor \
|
||||||
-v "${project_name}_sites-vol:/home/frappe/frappe-bench/sites" \
|
-v "${project_name}_sites-vol:/home/frappe/frappe-bench/sites" \
|
||||||
--network "${project_name}_default" \
|
--network "${project_name}_default" \
|
||||||
frappe/frappe-worker:edge doctor || true
|
frappe/frappe-worker:edge doctor || true
|
||||||
|
|
||||||
cmd='docker logs frappe_doctor | grep "Health check successful" || echo ""'
|
cmd='docker logs frappe_doctor | grep "Health check successful" || echo ""'
|
||||||
|
doctor_log=$(eval "$cmd")
|
||||||
|
INCREMENT=0
|
||||||
|
|
||||||
|
while [[ -z "${doctor_log}" && ${INCREMENT} -lt 60 ]]; do
|
||||||
|
sleep 1
|
||||||
|
((INCREMENT = INCREMENT + 1))
|
||||||
|
container=$(docker start frappe_doctor)
|
||||||
|
echo "Restarting ${container}..."
|
||||||
doctor_log=$(eval "$cmd")
|
doctor_log=$(eval "$cmd")
|
||||||
INCREMENT=0
|
|
||||||
|
|
||||||
while [[ -z "${doctor_log}" && ${INCREMENT} -lt 60 ]]; do
|
if [[ ${INCREMENT} -eq 60 ]]; then
|
||||||
sleep 1
|
docker logs "${container}"
|
||||||
((INCREMENT = INCREMENT + 1))
|
exit 1
|
||||||
container=$(docker start frappe_doctor)
|
fi
|
||||||
echo "Restarting ${container}..."
|
done
|
||||||
doctor_log=$(eval "$cmd")
|
|
||||||
|
|
||||||
if [[ ${INCREMENT} -eq 60 ]]; then
|
|
||||||
docker logs "${container}"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
}
|
}
|
||||||
|
|
||||||
# Initial group
|
# Initial group
|
||||||
@ -81,23 +81,23 @@ docker_compose_with_args up -d --quiet-pull
|
|||||||
echo Start postgres
|
echo Start postgres
|
||||||
docker pull postgres:11.8 -q
|
docker pull postgres:11.8 -q
|
||||||
docker run \
|
docker run \
|
||||||
--name postgresql \
|
--name postgresql \
|
||||||
-d \
|
-d \
|
||||||
-e POSTGRES_PASSWORD=admin \
|
-e POSTGRES_PASSWORD=admin \
|
||||||
-v ${project_name}_sites-vol:/home/frappe/frappe-bench/sites \
|
-v ${project_name}_sites-vol:/home/frappe/frappe-bench/sites \
|
||||||
--network ${project_name}_default \
|
--network ${project_name}_default \
|
||||||
postgres:11.8
|
postgres:11.8
|
||||||
|
|
||||||
check_health
|
check_health
|
||||||
|
|
||||||
print_group "Create new site "
|
print_group "Create new site "
|
||||||
SITE_NAME=test.localhost
|
SITE_NAME=test.localhost
|
||||||
docker run \
|
docker run \
|
||||||
--rm \
|
--rm \
|
||||||
-e SITE_NAME=$SITE_NAME \
|
-e SITE_NAME=$SITE_NAME \
|
||||||
-v ${project_name}_sites-vol:/home/frappe/frappe-bench/sites \
|
-v ${project_name}_sites-vol:/home/frappe/frappe-bench/sites \
|
||||||
--network ${project_name}_default \
|
--network ${project_name}_default \
|
||||||
frappe/frappe-worker:v13 new
|
frappe/frappe-worker:v13 new
|
||||||
|
|
||||||
ping_site
|
ping_site
|
||||||
|
|
||||||
@ -118,25 +118,25 @@ ping_site
|
|||||||
PG_SITE_NAME=pgsql.localhost
|
PG_SITE_NAME=pgsql.localhost
|
||||||
print_group "Create new site (Postgres)"
|
print_group "Create new site (Postgres)"
|
||||||
docker run \
|
docker run \
|
||||||
--rm \
|
--rm \
|
||||||
-e SITE_NAME=$PG_SITE_NAME \
|
-e SITE_NAME=$PG_SITE_NAME \
|
||||||
-e POSTGRES_HOST=postgresql \
|
-e POSTGRES_HOST=postgresql \
|
||||||
-e DB_ROOT_USER=postgres \
|
-e DB_ROOT_USER=postgres \
|
||||||
-e POSTGRES_PASSWORD=admin \
|
-e POSTGRES_PASSWORD=admin \
|
||||||
-v ${project_name}_sites-vol:/home/frappe/frappe-bench/sites \
|
-v ${project_name}_sites-vol:/home/frappe/frappe-bench/sites \
|
||||||
--network ${project_name}_default \
|
--network ${project_name}_default \
|
||||||
frappe/frappe-worker:edge new
|
frappe/frappe-worker:edge new
|
||||||
|
|
||||||
check_migration_complete
|
check_migration_complete
|
||||||
SITE_NAME=$PG_SITE_NAME ping_site
|
SITE_NAME=$PG_SITE_NAME ping_site
|
||||||
|
|
||||||
print_group Backup site
|
print_group Backup site
|
||||||
docker run \
|
docker run \
|
||||||
--rm \
|
--rm \
|
||||||
-e WITH_FILES=1 \
|
-e WITH_FILES=1 \
|
||||||
-v ${project_name}_sites-vol:/home/frappe/frappe-bench/sites \
|
-v ${project_name}_sites-vol:/home/frappe/frappe-bench/sites \
|
||||||
--network ${project_name}_default \
|
--network ${project_name}_default \
|
||||||
frappe/frappe-worker:edge backup
|
frappe/frappe-worker:edge backup
|
||||||
|
|
||||||
MINIO_ACCESS_KEY="AKIAIOSFODNN7EXAMPLE"
|
MINIO_ACCESS_KEY="AKIAIOSFODNN7EXAMPLE"
|
||||||
MINIO_SECRET_KEY="wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"
|
MINIO_SECRET_KEY="wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"
|
||||||
@ -144,39 +144,39 @@ MINIO_SECRET_KEY="wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY"
|
|||||||
print_group Prepare S3 server
|
print_group Prepare S3 server
|
||||||
echo Start S3 server
|
echo Start S3 server
|
||||||
docker run \
|
docker run \
|
||||||
--name minio \
|
--name minio \
|
||||||
-d \
|
-d \
|
||||||
-e "MINIO_ACCESS_KEY=$MINIO_ACCESS_KEY" \
|
-e "MINIO_ACCESS_KEY=$MINIO_ACCESS_KEY" \
|
||||||
-e "MINIO_SECRET_KEY=$MINIO_SECRET_KEY" \
|
-e "MINIO_SECRET_KEY=$MINIO_SECRET_KEY" \
|
||||||
--network ${project_name}_default \
|
--network ${project_name}_default \
|
||||||
minio/minio server /data
|
minio/minio server /data
|
||||||
|
|
||||||
echo Create bucket
|
echo Create bucket
|
||||||
docker run \
|
docker run \
|
||||||
--rm \
|
--rm \
|
||||||
--network ${project_name}_default \
|
--network ${project_name}_default \
|
||||||
vltgroup/s3cmd:latest \
|
vltgroup/s3cmd:latest \
|
||||||
s3cmd \
|
s3cmd \
|
||||||
--access_key=$MINIO_ACCESS_KEY \
|
--access_key=$MINIO_ACCESS_KEY \
|
||||||
--secret_key=$MINIO_SECRET_KEY \
|
--secret_key=$MINIO_SECRET_KEY \
|
||||||
--region=us-east-1 \
|
--region=us-east-1 \
|
||||||
--no-ssl \
|
--no-ssl \
|
||||||
--host=minio:9000 \
|
--host=minio:9000 \
|
||||||
--host-bucket=minio:9000 \
|
--host-bucket=minio:9000 \
|
||||||
mb s3://frappe
|
mb s3://frappe
|
||||||
|
|
||||||
print_group Push backup
|
print_group Push backup
|
||||||
docker run \
|
docker run \
|
||||||
--rm \
|
--rm \
|
||||||
-e BUCKET_NAME=frappe \
|
-e BUCKET_NAME=frappe \
|
||||||
-e REGION=us-east-1 \
|
-e REGION=us-east-1 \
|
||||||
-e BUCKET_DIR=local \
|
-e BUCKET_DIR=local \
|
||||||
-e ACCESS_KEY_ID=$MINIO_ACCESS_KEY \
|
-e ACCESS_KEY_ID=$MINIO_ACCESS_KEY \
|
||||||
-e SECRET_ACCESS_KEY=$MINIO_SECRET_KEY \
|
-e SECRET_ACCESS_KEY=$MINIO_SECRET_KEY \
|
||||||
-e ENDPOINT_URL=http://minio:9000 \
|
-e ENDPOINT_URL=http://minio:9000 \
|
||||||
-v ${project_name}_sites-vol:/home/frappe/frappe-bench/sites \
|
-v ${project_name}_sites-vol:/home/frappe/frappe-bench/sites \
|
||||||
--network ${project_name}_default \
|
--network ${project_name}_default \
|
||||||
frappe/frappe-worker:edge push-backup
|
frappe/frappe-worker:edge push-backup
|
||||||
|
|
||||||
print_group Prune and restart services
|
print_group Prune and restart services
|
||||||
docker_compose_with_args stop
|
docker_compose_with_args stop
|
||||||
@ -187,17 +187,17 @@ check_health
|
|||||||
|
|
||||||
print_group Restore backup from S3
|
print_group Restore backup from S3
|
||||||
docker run \
|
docker run \
|
||||||
--rm \
|
--rm \
|
||||||
-e MYSQL_ROOT_PASSWORD=admin \
|
-e MYSQL_ROOT_PASSWORD=admin \
|
||||||
-e BUCKET_NAME=frappe \
|
-e BUCKET_NAME=frappe \
|
||||||
-e BUCKET_DIR=local \
|
-e BUCKET_DIR=local \
|
||||||
-e ACCESS_KEY_ID=$MINIO_ACCESS_KEY \
|
-e ACCESS_KEY_ID=$MINIO_ACCESS_KEY \
|
||||||
-e SECRET_ACCESS_KEY=$MINIO_SECRET_KEY \
|
-e SECRET_ACCESS_KEY=$MINIO_SECRET_KEY \
|
||||||
-e ENDPOINT_URL=http://minio:9000 \
|
-e ENDPOINT_URL=http://minio:9000 \
|
||||||
-e REGION=us-east-1 \
|
-e REGION=us-east-1 \
|
||||||
-v ${project_name}_sites-vol:/home/frappe/frappe-bench/sites \
|
-v ${project_name}_sites-vol:/home/frappe/frappe-bench/sites \
|
||||||
--network ${project_name}_default \
|
--network ${project_name}_default \
|
||||||
frappe/frappe-worker:edge restore-backup
|
frappe/frappe-worker:edge restore-backup
|
||||||
|
|
||||||
check_health
|
check_health
|
||||||
ping_site
|
ping_site
|
||||||
@ -206,78 +206,78 @@ SITE_NAME=$PG_SITE_NAME ping_site
|
|||||||
EDGE_SITE_NAME=edge.localhost
|
EDGE_SITE_NAME=edge.localhost
|
||||||
print_group "Create new site (edge)"
|
print_group "Create new site (edge)"
|
||||||
docker run \
|
docker run \
|
||||||
--rm \
|
--rm \
|
||||||
-e SITE_NAME=$EDGE_SITE_NAME \
|
-e SITE_NAME=$EDGE_SITE_NAME \
|
||||||
-e INSTALL_APPS=frappe \
|
-e INSTALL_APPS=frappe \
|
||||||
-v ${project_name}_sites-vol:/home/frappe/frappe-bench/sites \
|
-v ${project_name}_sites-vol:/home/frappe/frappe-bench/sites \
|
||||||
--network ${project_name}_default \
|
--network ${project_name}_default \
|
||||||
frappe/frappe-worker:edge new
|
frappe/frappe-worker:edge new
|
||||||
|
|
||||||
check_health
|
check_health
|
||||||
SITE_NAME=$EDGE_SITE_NAME ping_site
|
SITE_NAME=$EDGE_SITE_NAME ping_site
|
||||||
|
|
||||||
print_group Migrate edge site
|
print_group Migrate edge site
|
||||||
docker run \
|
docker run \
|
||||||
--rm \
|
--rm \
|
||||||
-e MAINTENANCE_MODE=1 \
|
-e MAINTENANCE_MODE=1 \
|
||||||
-v ${project_name}_sites-vol:/home/frappe/frappe-bench/sites \
|
-v ${project_name}_sites-vol:/home/frappe/frappe-bench/sites \
|
||||||
-v ${project_name}_assets-vol:/home/frappe/frappe-bench/sites/assets \
|
-v ${project_name}_assets-vol:/home/frappe/frappe-bench/sites/assets \
|
||||||
--network ${project_name}_default \
|
--network ${project_name}_default \
|
||||||
frappe/frappe-worker:edge migrate
|
frappe/frappe-worker:edge migrate
|
||||||
|
|
||||||
check_migration_complete
|
check_migration_complete
|
||||||
|
|
||||||
print_group "Restore backup S3 (overwrite)"
|
print_group "Restore backup S3 (overwrite)"
|
||||||
docker run \
|
docker run \
|
||||||
--rm \
|
--rm \
|
||||||
-e MYSQL_ROOT_PASSWORD=admin \
|
-e MYSQL_ROOT_PASSWORD=admin \
|
||||||
-e BUCKET_NAME=frappe \
|
-e BUCKET_NAME=frappe \
|
||||||
-e BUCKET_DIR=local \
|
-e BUCKET_DIR=local \
|
||||||
-e ACCESS_KEY_ID=$MINIO_ACCESS_KEY \
|
-e ACCESS_KEY_ID=$MINIO_ACCESS_KEY \
|
||||||
-e SECRET_ACCESS_KEY=$MINIO_SECRET_KEY \
|
-e SECRET_ACCESS_KEY=$MINIO_SECRET_KEY \
|
||||||
-e ENDPOINT_URL=http://minio:9000 \
|
-e ENDPOINT_URL=http://minio:9000 \
|
||||||
-e REGION=us-east-1 \
|
-e REGION=us-east-1 \
|
||||||
-v ${project_name}_sites-vol:/home/frappe/frappe-bench/sites \
|
-v ${project_name}_sites-vol:/home/frappe/frappe-bench/sites \
|
||||||
--network ${project_name}_default \
|
--network ${project_name}_default \
|
||||||
frappe/frappe-worker:edge restore-backup
|
frappe/frappe-worker:edge restore-backup
|
||||||
|
|
||||||
check_migration_complete
|
check_migration_complete
|
||||||
ping_site
|
ping_site
|
||||||
|
|
||||||
print_group "Check console for $SITE_NAME"
|
print_group "Check console for $SITE_NAME"
|
||||||
docker run \
|
docker run \
|
||||||
--rm \
|
--rm \
|
||||||
-v ${project_name}_sites-vol:/home/frappe/frappe-bench/sites \
|
-v ${project_name}_sites-vol:/home/frappe/frappe-bench/sites \
|
||||||
--network ${project_name}_default \
|
--network ${project_name}_default \
|
||||||
frappe/frappe-worker:edge console $SITE_NAME
|
frappe/frappe-worker:edge console $SITE_NAME
|
||||||
|
|
||||||
print_group "Check console for $PG_SITE_NAME"
|
print_group "Check console for $PG_SITE_NAME"
|
||||||
docker run \
|
docker run \
|
||||||
--rm \
|
--rm \
|
||||||
-v ${project_name}_sites-vol:/home/frappe/frappe-bench/sites \
|
-v ${project_name}_sites-vol:/home/frappe/frappe-bench/sites \
|
||||||
--network ${project_name}_default \
|
--network ${project_name}_default \
|
||||||
frappe/frappe-worker:edge console $PG_SITE_NAME
|
frappe/frappe-worker:edge console $PG_SITE_NAME
|
||||||
|
|
||||||
print_group "Check drop site for $SITE_NAME (MariaDB)"
|
print_group "Check drop site for $SITE_NAME (MariaDB)"
|
||||||
docker run \
|
docker run \
|
||||||
--rm \
|
--rm \
|
||||||
-e SITE_NAME=$SITE_NAME \
|
-e SITE_NAME=$SITE_NAME \
|
||||||
-v ${project_name}_sites-vol:/home/frappe/frappe-bench/sites \
|
-v ${project_name}_sites-vol:/home/frappe/frappe-bench/sites \
|
||||||
--network ${project_name}_default \
|
--network ${project_name}_default \
|
||||||
frappe/frappe-worker:edge drop
|
frappe/frappe-worker:edge drop
|
||||||
|
|
||||||
print_group "Check drop site for $PG_SITE_NAME (Postgres)"
|
print_group "Check drop site for $PG_SITE_NAME (Postgres)"
|
||||||
docker run \
|
docker run \
|
||||||
--rm \
|
--rm \
|
||||||
-e SITE_NAME=$PG_SITE_NAME \
|
-e SITE_NAME=$PG_SITE_NAME \
|
||||||
-v ${project_name}_sites-vol:/home/frappe/frappe-bench/sites \
|
-v ${project_name}_sites-vol:/home/frappe/frappe-bench/sites \
|
||||||
--network ${project_name}_default \
|
--network ${project_name}_default \
|
||||||
frappe/frappe-worker:edge drop
|
frappe/frappe-worker:edge drop
|
||||||
|
|
||||||
print_group Check bench --help
|
print_group Check bench --help
|
||||||
docker run \
|
docker run \
|
||||||
--rm \
|
--rm \
|
||||||
-v ${project_name}_sites-vol:/home/frappe/frappe-bench/sites \
|
-v ${project_name}_sites-vol:/home/frappe/frappe-bench/sites \
|
||||||
--network ${project_name}_default \
|
--network ${project_name}_default \
|
||||||
--user frappe \
|
--user frappe \
|
||||||
frappe/frappe-worker:edge bench --help
|
frappe/frappe-worker:edge bench --help
|
||||||
|
@ -16,29 +16,29 @@ cat .env
|
|||||||
|
|
||||||
print_group Start services
|
print_group Start services
|
||||||
docker-compose \
|
docker-compose \
|
||||||
-p $project_name \
|
-p $project_name \
|
||||||
-f installation/docker-compose-common.yml \
|
-f installation/docker-compose-common.yml \
|
||||||
-f installation/docker-compose-erpnext.yml \
|
-f installation/docker-compose-erpnext.yml \
|
||||||
-f installation/erpnext-publish.yml \
|
-f installation/erpnext-publish.yml \
|
||||||
up -d
|
up -d
|
||||||
|
|
||||||
print_group Fix permissions
|
print_group Fix permissions
|
||||||
docker run \
|
docker run \
|
||||||
--rm \
|
--rm \
|
||||||
--user root \
|
--user root \
|
||||||
-v ${project_name}_sites-vol:/sites \
|
-v ${project_name}_sites-vol:/sites \
|
||||||
-v ${project_name}_assets-vol:/assets \
|
-v ${project_name}_assets-vol:/assets \
|
||||||
-v ${project_name}_logs-vol:/logs \
|
-v ${project_name}_logs-vol:/logs \
|
||||||
frappe/erpnext-worker:test chown -R 1000:1000 /logs /sites /assets
|
frappe/erpnext-worker:test chown -R 1000:1000 /logs /sites /assets
|
||||||
|
|
||||||
print_group Create site
|
print_group Create site
|
||||||
docker run \
|
docker run \
|
||||||
--rm \
|
--rm \
|
||||||
-e "SITE_NAME=$SITE_NAME" \
|
-e "SITE_NAME=$SITE_NAME" \
|
||||||
-e "INSTALL_APPS=erpnext" \
|
-e "INSTALL_APPS=erpnext" \
|
||||||
-v ${project_name}_sites-vol:/home/frappe/frappe-bench/sites \
|
-v ${project_name}_sites-vol:/home/frappe/frappe-bench/sites \
|
||||||
--network ${project_name}_default \
|
--network ${project_name}_default \
|
||||||
frappe/erpnext-worker:test new
|
frappe/erpnext-worker:test new
|
||||||
|
|
||||||
ping_site
|
ping_site
|
||||||
rm .env
|
rm .env
|
||||||
|
@ -8,13 +8,13 @@ project_name="test_frappe"
|
|||||||
SITE_NAME="test_frappe.localhost"
|
SITE_NAME="test_frappe.localhost"
|
||||||
|
|
||||||
docker_compose_with_args() {
|
docker_compose_with_args() {
|
||||||
# shellcheck disable=SC2068
|
# shellcheck disable=SC2068
|
||||||
docker-compose \
|
docker-compose \
|
||||||
-p $project_name \
|
-p $project_name \
|
||||||
-f installation/docker-compose-common.yml \
|
-f installation/docker-compose-common.yml \
|
||||||
-f installation/docker-compose-frappe.yml \
|
-f installation/docker-compose-frappe.yml \
|
||||||
-f installation/frappe-publish.yml \
|
-f installation/frappe-publish.yml \
|
||||||
$@
|
$@
|
||||||
}
|
}
|
||||||
|
|
||||||
echo ::group::Setup env
|
echo ::group::Setup env
|
||||||
@ -29,11 +29,11 @@ docker_compose_with_args up -d
|
|||||||
|
|
||||||
print_group Create site
|
print_group Create site
|
||||||
docker run \
|
docker run \
|
||||||
--rm \
|
--rm \
|
||||||
-e "SITE_NAME=$SITE_NAME" \
|
-e "SITE_NAME=$SITE_NAME" \
|
||||||
-v ${project_name}_sites-vol:/home/frappe/frappe-bench/sites \
|
-v ${project_name}_sites-vol:/home/frappe/frappe-bench/sites \
|
||||||
--network ${project_name}_default \
|
--network ${project_name}_default \
|
||||||
frappe/frappe-worker:test new
|
frappe/frappe-worker:test new
|
||||||
|
|
||||||
ping_site
|
ping_site
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user