diff --git a/apiserver/.env.example b/apiserver/.env.example index 4969f1766..8772dfd53 100644 --- a/apiserver/.env.example +++ b/apiserver/.env.example @@ -1,7 +1,7 @@ # Backend # Debug value for api server use it as 0 for production use DEBUG=0 -DJANGO_SETTINGS_MODULE="plane.settings.selfhosted" +DJANGO_SETTINGS_MODULE="plane.settings.production" # Error logs SENTRY_DSN="" @@ -59,3 +59,6 @@ DEFAULT_PASSWORD="password123" # SignUps ENABLE_SIGNUP="1" + +# Email redirections and minio domain settings +WEB_URL="http://localhost" diff --git a/apiserver/plane/settings/production.py b/apiserver/plane/settings/production.py index e434f9742..170a2067c 100644 --- a/apiserver/plane/settings/production.py +++ b/apiserver/plane/settings/production.py @@ -7,6 +7,7 @@ import dj_database_url import sentry_sdk from sentry_sdk.integrations.django import DjangoIntegration from sentry_sdk.integrations.redis import RedisIntegration +from urllib.parse import urlparse from .common import * # noqa @@ -89,90 +90,112 @@ if bool(os.environ.get("SENTRY_DSN", False)): profiles_sample_rate=1.0, ) -# The AWS region to connect to. -AWS_REGION = os.environ.get("AWS_REGION", "") +if DOCKERIZED and USE_MINIO: + INSTALLED_APPS += ("storages",) + STORAGES["default"] = {"BACKEND": "storages.backends.s3boto3.S3Boto3Storage"} + # The AWS access key to use. + AWS_ACCESS_KEY_ID = os.environ.get("AWS_ACCESS_KEY_ID", "access-key") + # The AWS secret access key to use. + AWS_SECRET_ACCESS_KEY = os.environ.get("AWS_SECRET_ACCESS_KEY", "secret-key") + # The name of the bucket to store files in. + AWS_STORAGE_BUCKET_NAME = os.environ.get("AWS_S3_BUCKET_NAME", "uploads") + # The full URL to the S3 endpoint. Leave blank to use the default region URL. + AWS_S3_ENDPOINT_URL = os.environ.get( + "AWS_S3_ENDPOINT_URL", "http://plane-minio:9000" + ) + # Default permissions + AWS_DEFAULT_ACL = "public-read" + AWS_QUERYSTRING_AUTH = False + AWS_S3_FILE_OVERWRITE = False -# The AWS access key to use. -AWS_ACCESS_KEY_ID = os.environ.get("AWS_ACCESS_KEY_ID", "") + # Custom Domain settings + parsed_url = urlparse(os.environ.get("WEB_URL", "http://localhost")) + AWS_S3_CUSTOM_DOMAIN = f"{parsed_url.netloc}/{AWS_STORAGE_BUCKET_NAME}" + AWS_S3_URL_PROTOCOL = f"{parsed_url.scheme}:" +else: + # The AWS region to connect to. + AWS_REGION = os.environ.get("AWS_REGION", "") -# The AWS secret access key to use. -AWS_SECRET_ACCESS_KEY = os.environ.get("AWS_SECRET_ACCESS_KEY", "") + # The AWS access key to use. + AWS_ACCESS_KEY_ID = os.environ.get("AWS_ACCESS_KEY_ID", "") -# The optional AWS session token to use. -# AWS_SESSION_TOKEN = "" + # The AWS secret access key to use. + AWS_SECRET_ACCESS_KEY = os.environ.get("AWS_SECRET_ACCESS_KEY", "") -# The name of the bucket to store files in. -AWS_S3_BUCKET_NAME = os.environ.get("AWS_S3_BUCKET_NAME") + # The optional AWS session token to use. + # AWS_SESSION_TOKEN = "" -# How to construct S3 URLs ("auto", "path", "virtual"). -AWS_S3_ADDRESSING_STYLE = "auto" + # The name of the bucket to store files in. + AWS_S3_BUCKET_NAME = os.environ.get("AWS_S3_BUCKET_NAME") -# The full URL to the S3 endpoint. Leave blank to use the default region URL. -AWS_S3_ENDPOINT_URL = os.environ.get("AWS_S3_ENDPOINT_URL", "") + # How to construct S3 URLs ("auto", "path", "virtual"). + AWS_S3_ADDRESSING_STYLE = "auto" -# A prefix to be applied to every stored file. This will be joined to every filename using the "/" separator. -AWS_S3_KEY_PREFIX = "" + # The full URL to the S3 endpoint. Leave blank to use the default region URL. + AWS_S3_ENDPOINT_URL = os.environ.get("AWS_S3_ENDPOINT_URL", "") -# Whether to enable authentication for stored files. If True, then generated URLs will include an authentication -# token valid for `AWS_S3_MAX_AGE_SECONDS`. If False, then generated URLs will not include an authentication token, -# and their permissions will be set to "public-read". -AWS_S3_BUCKET_AUTH = False + # A prefix to be applied to every stored file. This will be joined to every filename using the "/" separator. + AWS_S3_KEY_PREFIX = "" -# How long generated URLs are valid for. This affects the expiry of authentication tokens if `AWS_S3_BUCKET_AUTH` -# is True. It also affects the "Cache-Control" header of the files. -# Important: Changing this setting will not affect existing files. -AWS_S3_MAX_AGE_SECONDS = 60 * 60 # 1 hours. + # Whether to enable authentication for stored files. If True, then generated URLs will include an authentication + # token valid for `AWS_S3_MAX_AGE_SECONDS`. If False, then generated URLs will not include an authentication token, + # and their permissions will be set to "public-read". + AWS_S3_BUCKET_AUTH = False -# A URL prefix to be used for generated URLs. This is useful if your bucket is served through a CDN. This setting -# cannot be used with `AWS_S3_BUCKET_AUTH`. -AWS_S3_PUBLIC_URL = "" + # How long generated URLs are valid for. This affects the expiry of authentication tokens if `AWS_S3_BUCKET_AUTH` + # is True. It also affects the "Cache-Control" header of the files. + # Important: Changing this setting will not affect existing files. + AWS_S3_MAX_AGE_SECONDS = 60 * 60 # 1 hours. -# If True, then files will be stored with reduced redundancy. Check the S3 documentation and make sure you -# understand the consequences before enabling. -# Important: Changing this setting will not affect existing files. -AWS_S3_REDUCED_REDUNDANCY = False + # A URL prefix to be used for generated URLs. This is useful if your bucket is served through a CDN. This setting + # cannot be used with `AWS_S3_BUCKET_AUTH`. + AWS_S3_PUBLIC_URL = "" -# The Content-Disposition header used when the file is downloaded. This can be a string, or a function taking a -# single `name` argument. -# Important: Changing this setting will not affect existing files. -AWS_S3_CONTENT_DISPOSITION = "" + # If True, then files will be stored with reduced redundancy. Check the S3 documentation and make sure you + # understand the consequences before enabling. + # Important: Changing this setting will not affect existing files. + AWS_S3_REDUCED_REDUNDANCY = False -# The Content-Language header used when the file is downloaded. This can be a string, or a function taking a -# single `name` argument. -# Important: Changing this setting will not affect existing files. -AWS_S3_CONTENT_LANGUAGE = "" + # The Content-Disposition header used when the file is downloaded. This can be a string, or a function taking a + # single `name` argument. + # Important: Changing this setting will not affect existing files. + AWS_S3_CONTENT_DISPOSITION = "" -# A mapping of custom metadata for each file. Each value can be a string, or a function taking a -# single `name` argument. -# Important: Changing this setting will not affect existing files. -AWS_S3_METADATA = {} + # The Content-Language header used when the file is downloaded. This can be a string, or a function taking a + # single `name` argument. + # Important: Changing this setting will not affect existing files. + AWS_S3_CONTENT_LANGUAGE = "" -# If True, then files will be stored using AES256 server-side encryption. -# If this is a string value (e.g., "aws:kms"), that encryption type will be used. -# Otherwise, server-side encryption is not be enabled. -# Important: Changing this setting will not affect existing files. -AWS_S3_ENCRYPT_KEY = False + # A mapping of custom metadata for each file. Each value can be a string, or a function taking a + # single `name` argument. + # Important: Changing this setting will not affect existing files. + AWS_S3_METADATA = {} -# The AWS S3 KMS encryption key ID (the `SSEKMSKeyId` parameter) is set from this string if present. -# This is only relevant if AWS S3 KMS server-side encryption is enabled (above). -# AWS_S3_KMS_ENCRYPTION_KEY_ID = "" + # If True, then files will be stored using AES256 server-side encryption. + # If this is a string value (e.g., "aws:kms"), that encryption type will be used. + # Otherwise, server-side encryption is not be enabled. + # Important: Changing this setting will not affect existing files. + AWS_S3_ENCRYPT_KEY = False -# If True, then text files will be stored using gzip content encoding. Files will only be gzipped if their -# compressed size is smaller than their uncompressed size. -# Important: Changing this setting will not affect existing files. -AWS_S3_GZIP = True + # The AWS S3 KMS encryption key ID (the `SSEKMSKeyId` parameter) is set from this string if present. + # This is only relevant if AWS S3 KMS server-side encryption is enabled (above). + # AWS_S3_KMS_ENCRYPTION_KEY_ID = "" -# The signature version to use for S3 requests. -AWS_S3_SIGNATURE_VERSION = None + # If True, then text files will be stored using gzip content encoding. Files will only be gzipped if their + # compressed size is smaller than their uncompressed size. + # Important: Changing this setting will not affect existing files. + AWS_S3_GZIP = True -# If True, then files with the same name will overwrite each other. By default it's set to False to have -# extra characters appended. -AWS_S3_FILE_OVERWRITE = False + # The signature version to use for S3 requests. + AWS_S3_SIGNATURE_VERSION = None -STORAGES["default"] = { - "BACKEND": "django_s3_storage.storage.S3Storage", -} + # If True, then files with the same name will overwrite each other. By default it's set to False to have + # extra characters appended. + AWS_S3_FILE_OVERWRITE = False + STORAGES["default"] = { + "BACKEND": "django_s3_storage.storage.S3Storage", + } # AWS Settings End # Enable Connection Pooling (if desired) @@ -193,16 +216,27 @@ CSRF_COOKIE_SECURE = True REDIS_URL = os.environ.get("REDIS_URL") -CACHES = { - "default": { - "BACKEND": "django_redis.cache.RedisCache", - "LOCATION": REDIS_URL, - "OPTIONS": { - "CLIENT_CLASS": "django_redis.client.DefaultClient", - "CONNECTION_POOL_KWARGS": {"ssl_cert_reqs": False}, - }, +if DOCKERIZED: + CACHES = { + "default": { + "BACKEND": "django_redis.cache.RedisCache", + "LOCATION": REDIS_URL, + "OPTIONS": { + "CLIENT_CLASS": "django_redis.client.DefaultClient", + }, + } + } +else: + CACHES = { + "default": { + "BACKEND": "django_redis.cache.RedisCache", + "LOCATION": REDIS_URL, + "OPTIONS": { + "CLIENT_CLASS": "django_redis.client.DefaultClient", + "CONNECTION_POOL_KWARGS": {"ssl_cert_reqs": False}, + }, + } } -} WEB_URL = os.environ.get("WEB_URL", "https://app.plane.so") @@ -225,8 +259,12 @@ broker_url = ( f"{redis_url}?ssl_cert_reqs={ssl.CERT_NONE.name}&ssl_ca_certs={certifi.where()}" ) -CELERY_RESULT_BACKEND = broker_url -CELERY_BROKER_URL = broker_url +if DOCKERIZED: + CELERY_BROKER_URL = REDIS_URL + CELERY_RESULT_BACKEND = REDIS_URL +else: + CELERY_BROKER_URL = broker_url + CELERY_RESULT_BACKEND = broker_url GITHUB_ACCESS_TOKEN = os.environ.get("GITHUB_ACCESS_TOKEN", False) @@ -237,4 +275,3 @@ ENABLE_SIGNUP = os.environ.get("ENABLE_SIGNUP", "1") == "1" SCOUT_MONITOR = os.environ.get("SCOUT_MONITOR", False) SCOUT_KEY = os.environ.get("SCOUT_KEY", "") SCOUT_NAME = "Plane" -