Compare commits

...

14 Commits

Author SHA1 Message Date
NarayanBavisetti
4ce3fa356a fix: changed migration and added public s3 2023-10-27 12:50:12 +05:30
NarayanBavisetti
2628890068 Merge branch 'develop' of github.com:makeplane/plane into dev/private_bucket_for_attachments 2023-10-27 12:19:33 +05:30
pablohashescobar
0d07ecb337 dev: rearrange migrations and add class calls 2023-09-25 15:08:09 +05:30
pablohashescobar
8745f20ef4 Merge branch 'dev/private_bucket_for_attachments' of github.com:makeplane/plane into dev/private_bucket_for_attachments 2023-09-25 13:40:56 +05:30
pablohashescobar
05578e4729 Merge branch 'develop' of github.com:makeplane/plane into dev/private_bucket_for_attachments 2023-09-25 13:40:15 +05:30
pablohashescobar
b3a755a311 dev: fix migration inconsistency 2023-09-25 13:38:42 +05:30
pablohashescobar
c12cb26c4d Merge branch 'develop' of github.com:makeplane/plane into dev/private_bucket_for_attachments 2023-09-25 13:20:10 +05:30
pablohashescobar
8d0cd67198 dev: update configuration for the self hosted instance 2023-09-25 13:00:34 +05:30
pablohashescobar
ad9ff684e9 Merge branch 'develop' of github.com:makeplane/plane into dev/private_bucket_for_attachments 2023-09-25 12:29:59 +05:30
pablohashescobar
7e6e6531ad dev: self hosted private settings 2023-09-25 12:29:09 +05:30
pablohashescobar
c76d1dc8db Merge branch 'develop' of github.com:makeplane/plane into dev/private_bucket_for_attachments 2023-09-22 13:22:14 +05:30
pablohashescobar
5a7b19ae78 dev: update configuration 2023-09-22 13:16:58 +05:30
pablohashescobar
dd760b4f38 dev: add overwrite configuration and configuration for self hosted version 2023-09-22 12:57:58 +05:30
pablohashescobar
edceef71b4 dev: new private bucket for storing attachments 2023-09-22 12:25:18 +05:30
14 changed files with 181 additions and 218 deletions

View File

@ -16,7 +16,8 @@ AWS_ACCESS_KEY_ID="access-key"
AWS_SECRET_ACCESS_KEY="secret-key"
AWS_S3_ENDPOINT_URL="http://plane-minio:9000"
# Changing this requires change in the nginx.conf for uploads if using minio setup
AWS_S3_BUCKET_NAME="uploads"
AWS_PUBLIC_STORAGE_BUCKET_NAME="uploads"
AWS_PRIVATE_STORAGE_BUCKET_NAME="uploads-private"
# Maximum file upload limit
FILE_SIZE_LIMIT=5242880

View File

@ -106,14 +106,14 @@ def upload_to_s3(zip_file, workspace_id, token_id, slug):
)
s3.upload_fileobj(
zip_file,
settings.AWS_S3_BUCKET_NAME,
settings.AWS_PUBLIC_STORAGE_BUCKET_NAME,
file_name,
ExtraArgs={"ACL": "public-read", "ContentType": "application/zip"},
)
presigned_url = s3.generate_presigned_url(
"get_object",
Params={"Bucket": settings.AWS_S3_BUCKET_NAME, "Key": file_name},
Params={"Bucket": settings.AWS_PUBLIC_STORAGE_BUCKET_NAME, "Key": file_name},
ExpiresIn=expires_in,
)

View File

@ -44,6 +44,6 @@ def delete_old_s3_link():
if settings.DOCKERIZED and settings.USE_MINIO:
s3.delete_object(Bucket=settings.AWS_STORAGE_BUCKET_NAME, Key=file_name)
else:
s3.delete_object(Bucket=settings.AWS_S3_BUCKET_NAME, Key=file_name)
s3.delete_object(Bucket=settings.AWS_PUBLIC_STORAGE_BUCKET_NAME, Key=file_name)
ExporterHistory.objects.filter(id=exporter_id).update(url=None)

View File

@ -0,0 +1,38 @@
# Generated by Django 4.2.3 on 2023-09-21 14:16
import boto3
from django.db import migrations
from django.conf import settings
def move_s3_objects(apps, schema_editor):
IssueAttachment = apps.get_model("db", "IssueAttachment")
# Your source and destination bucket names
source_bucket = settings.AWS_PUBLIC_STORAGE_BUCKET_NAME
destination_bucket = settings.AWS_PRIVATE_STORAGE_BUCKET_NAME
s3_client = boto3.client(
"s3",
aws_access_key_id=settings.AWS_ACCESS_KEY_ID,
aws_secret_access_key=settings.AWS_SECRET_ACCESS_KEY,
)
for key in IssueAttachment.objects.values_list("asset", flat=True):
try:
copy_source = {"Bucket": source_bucket, "Key": key}
s3_client.copy_object(
Bucket=destination_bucket, CopySource=copy_source, Key=key
)
except Exception as e:
pass
class Migration(migrations.Migration):
dependencies = [
("db", "0045_issueactivity_epoch_workspacemember_issue_props_and_more"),
]
operations = [
migrations.RunPython(move_s3_objects),
]

View File

@ -8,7 +8,7 @@ from django.conf import settings
# Module import
from . import BaseModel
from plane.settings.storage import PublicS3Storage
def get_upload_path(instance, filename):
if instance.workspace_id is not None:
@ -32,6 +32,7 @@ class FileAsset(BaseModel):
validators=[
file_size,
],
storage=PublicS3Storage(),
)
workspace = models.ForeignKey(
"db.Workspace", on_delete=models.CASCADE, null=True, related_name="assets"

View File

@ -14,7 +14,7 @@ from django.core.exceptions import ValidationError
# Module imports
from . import ProjectBaseModel
from plane.utils.html_processor import strip_tags
from plane.settings.storage import PrivateS3Storage
# TODO: Handle identifiers for Bulk Inserts - nk
class IssueManager(models.Manager):
@ -267,6 +267,7 @@ class IssueAttachment(ProjectBaseModel):
validators=[
file_size,
],
storage=PrivateS3Storage(),
)
issue = models.ForeignKey(
"db.Issue", on_delete=models.CASCADE, related_name="issue_attachment"

View File

@ -36,6 +36,7 @@ INSTALLED_APPS = [
"corsheaders",
"taggit",
"django_celery_beat",
"storages",
]
MIDDLEWARE = [

View File

@ -119,5 +119,29 @@ GITHUB_ACCESS_TOKEN = os.environ.get("GITHUB_ACCESS_TOKEN", False)
ENABLE_SIGNUP = os.environ.get("ENABLE_SIGNUP", "1") == "1"
# Storage Settings
STORAGES = {
"staticfiles": {
"BACKEND": "whitenoise.storage.CompressedManifestStaticFilesStorage",
},
}
STORAGES["default"] = {"BACKEND": "storages.backends.s3boto3.S3Boto3Storage"}
# Common AWS settings
AWS_ACCESS_KEY_ID = os.environ.get("AWS_ACCESS_KEY_ID")
AWS_SECRET_ACCESS_KEY = os.environ.get("AWS_SECRET_ACCESS_KEY")
AWS_REGION_NAME = os.environ.get("AWS_REGION")
AWS_S3_ADDRESSING_STYLE = os.environ.get("AWS_S3_ADDRESSING_STYLE")
AWS_S3_FILE_OVERWRITE = False
# Public S3 bucket settings
AWS_PUBLIC_STORAGE_BUCKET_NAME = os.environ.get("AWS_PUBLIC_STORAGE_BUCKET_NAME")
AWS_PUBLIC_DEFAULT_ACL = "public-read"
PUBLIC_FILE_STORAGE = "storages.backends.s3boto3.S3Boto3Storage"
# Private S3 bucket settings
AWS_PRIVATE_STORAGE_BUCKET_NAME = os.environ.get("AWS_PRIVATE_STORAGE_BUCKET_NAME")
AWS_S3_PRIVATE_FILE_OVERWRITE = False
AWS_PRIVATE_DEFAULT_ACL = "private"
PRIVATE_FILE_STORAGE = "storages.backends.s3boto3.S3Boto3Storage"
# End Storage Settings
# Unsplash Access key
UNSPLASH_ACCESS_KEY = os.environ.get("UNSPLASH_ACCESS_KEY")

View File

@ -90,113 +90,33 @@ if bool(os.environ.get("SENTRY_DSN", False)):
profiles_sample_rate=1.0,
)
if DOCKERIZED and USE_MINIO:
INSTALLED_APPS += ("storages",)
STORAGES["default"] = {"BACKEND": "storages.backends.s3boto3.S3Boto3Storage"}
# The AWS access key to use.
AWS_ACCESS_KEY_ID = os.environ.get("AWS_ACCESS_KEY_ID", "access-key")
# The AWS secret access key to use.
AWS_SECRET_ACCESS_KEY = os.environ.get("AWS_SECRET_ACCESS_KEY", "secret-key")
# The name of the bucket to store files in.
AWS_STORAGE_BUCKET_NAME = os.environ.get("AWS_S3_BUCKET_NAME", "uploads")
# The full URL to the S3 endpoint. Leave blank to use the default region URL.
AWS_S3_ENDPOINT_URL = os.environ.get(
"AWS_S3_ENDPOINT_URL", "http://plane-minio:9000"
)
# Default permissions
AWS_DEFAULT_ACL = "public-read"
AWS_QUERYSTRING_AUTH = False
AWS_S3_FILE_OVERWRITE = False
# Storage Settings
STORAGES = {
"staticfiles": {
"BACKEND": "whitenoise.storage.CompressedManifestStaticFilesStorage",
},
}
STORAGES["default"] = {"BACKEND": "storages.backends.s3boto3.S3Boto3Storage"}
# Common AWS settings
AWS_ACCESS_KEY_ID = os.environ.get("AWS_ACCESS_KEY_ID")
AWS_SECRET_ACCESS_KEY = os.environ.get("AWS_SECRET_ACCESS_KEY")
AWS_REGION_NAME = os.environ.get("AWS_REGION")
AWS_S3_ADDRESSING_STYLE = os.environ.get("AWS_S3_ADDRESSING_STYLE")
AWS_S3_FILE_OVERWRITE = False
# Public S3 bucket settings
AWS_PUBLIC_STORAGE_BUCKET_NAME = os.environ.get("AWS_PUBLIC_STORAGE_BUCKET_NAME")
AWS_PUBLIC_DEFAULT_ACL = "public-read"
AWS_S3_PUBLIC_OBJECT_PARAMETERS = {
"CacheControl": "max-age=86400",
}
PUBLIC_FILE_STORAGE = "storages.backends.s3boto3.S3Boto3Storage"
# Custom Domain settings
parsed_url = urlparse(os.environ.get("WEB_URL", "http://localhost"))
AWS_S3_CUSTOM_DOMAIN = f"{parsed_url.netloc}/{AWS_STORAGE_BUCKET_NAME}"
AWS_S3_URL_PROTOCOL = f"{parsed_url.scheme}:"
else:
# The AWS region to connect to.
AWS_REGION = os.environ.get("AWS_REGION", "")
# The AWS access key to use.
AWS_ACCESS_KEY_ID = os.environ.get("AWS_ACCESS_KEY_ID", "")
# The AWS secret access key to use.
AWS_SECRET_ACCESS_KEY = os.environ.get("AWS_SECRET_ACCESS_KEY", "")
# The optional AWS session token to use.
# AWS_SESSION_TOKEN = ""
# The name of the bucket to store files in.
AWS_S3_BUCKET_NAME = os.environ.get("AWS_S3_BUCKET_NAME")
# How to construct S3 URLs ("auto", "path", "virtual").
AWS_S3_ADDRESSING_STYLE = "auto"
# The full URL to the S3 endpoint. Leave blank to use the default region URL.
AWS_S3_ENDPOINT_URL = os.environ.get("AWS_S3_ENDPOINT_URL", "")
# A prefix to be applied to every stored file. This will be joined to every filename using the "/" separator.
AWS_S3_KEY_PREFIX = ""
# Whether to enable authentication for stored files. If True, then generated URLs will include an authentication
# token valid for `AWS_S3_MAX_AGE_SECONDS`. If False, then generated URLs will not include an authentication token,
# and their permissions will be set to "public-read".
AWS_S3_BUCKET_AUTH = False
# How long generated URLs are valid for. This affects the expiry of authentication tokens if `AWS_S3_BUCKET_AUTH`
# is True. It also affects the "Cache-Control" header of the files.
# Important: Changing this setting will not affect existing files.
AWS_S3_MAX_AGE_SECONDS = 60 * 60 # 1 hours.
# A URL prefix to be used for generated URLs. This is useful if your bucket is served through a CDN. This setting
# cannot be used with `AWS_S3_BUCKET_AUTH`.
AWS_S3_PUBLIC_URL = ""
# If True, then files will be stored with reduced redundancy. Check the S3 documentation and make sure you
# understand the consequences before enabling.
# Important: Changing this setting will not affect existing files.
AWS_S3_REDUCED_REDUNDANCY = False
# The Content-Disposition header used when the file is downloaded. This can be a string, or a function taking a
# single `name` argument.
# Important: Changing this setting will not affect existing files.
AWS_S3_CONTENT_DISPOSITION = ""
# The Content-Language header used when the file is downloaded. This can be a string, or a function taking a
# single `name` argument.
# Important: Changing this setting will not affect existing files.
AWS_S3_CONTENT_LANGUAGE = ""
# A mapping of custom metadata for each file. Each value can be a string, or a function taking a
# single `name` argument.
# Important: Changing this setting will not affect existing files.
AWS_S3_METADATA = {}
# If True, then files will be stored using AES256 server-side encryption.
# If this is a string value (e.g., "aws:kms"), that encryption type will be used.
# Otherwise, server-side encryption is not be enabled.
# Important: Changing this setting will not affect existing files.
AWS_S3_ENCRYPT_KEY = False
# The AWS S3 KMS encryption key ID (the `SSEKMSKeyId` parameter) is set from this string if present.
# This is only relevant if AWS S3 KMS server-side encryption is enabled (above).
# AWS_S3_KMS_ENCRYPTION_KEY_ID = ""
# If True, then text files will be stored using gzip content encoding. Files will only be gzipped if their
# compressed size is smaller than their uncompressed size.
# Important: Changing this setting will not affect existing files.
AWS_S3_GZIP = True
# The signature version to use for S3 requests.
AWS_S3_SIGNATURE_VERSION = None
# If True, then files with the same name will overwrite each other. By default it's set to False to have
# extra characters appended.
AWS_S3_FILE_OVERWRITE = False
STORAGES["default"] = {
"BACKEND": "django_s3_storage.storage.S3Storage",
}
# AWS Settings End
# Private S3 bucket settings
AWS_PRIVATE_STORAGE_BUCKET_NAME = os.environ.get("AWS_PRIVATE_STORAGE_BUCKET_NAME")
AWS_S3_PRIVATE_FILE_OVERWRITE = False
AWS_PRIVATE_DEFAULT_ACL = "private"
PRIVATE_FILE_STORAGE = "storages.backends.s3boto3.S3Boto3Storage"
# End Storage Settings
# Enable Connection Pooling (if desired)
# DATABASES['default']['ENGINE'] = 'django_postgrespool'

View File

@ -1,8 +1,5 @@
"""Self hosted settings and globals."""
from urllib.parse import urlparse
import dj_database_url
from urllib.parse import urlparse
from .common import * # noqa
@ -55,33 +52,33 @@ CORS_ALLOW_HEADERS = [
CORS_ALLOW_CREDENTIALS = True
CORS_ALLOW_ALL_ORIGINS = True
# Storage Settings
STORAGES = {
"staticfiles": {
"BACKEND": "whitenoise.storage.CompressedManifestStaticFilesStorage",
},
}
INSTALLED_APPS += ("storages",)
STORAGES["default"] = {"BACKEND": "storages.backends.s3boto3.S3Boto3Storage"}
# The AWS access key to use.
# Common AWS settings
AWS_ACCESS_KEY_ID = os.environ.get("AWS_ACCESS_KEY_ID", "access-key")
# The AWS secret access key to use.
AWS_SECRET_ACCESS_KEY = os.environ.get("AWS_SECRET_ACCESS_KEY", "secret-key")
# The name of the bucket to store files in.
AWS_STORAGE_BUCKET_NAME = os.environ.get("AWS_S3_BUCKET_NAME", "uploads")
# The full URL to the S3 endpoint. Leave blank to use the default region URL.
AWS_REGION_NAME = os.environ.get("AWS_REGION")
AWS_S3_ADDRESSING_STYLE = os.environ.get("AWS_S3_ADDRESSING_STYLE")
AWS_S3_ENDPOINT_URL = os.environ.get(
"AWS_S3_ENDPOINT_URL", "http://plane-minio:9000"
)
# Default permissions
AWS_DEFAULT_ACL = "public-read"
AWS_QUERYSTRING_AUTH = False
AWS_S3_FILE_OVERWRITE = False
AWS_DEFAULT_ACL = None
# Public S3 bucket settings
AWS_PUBLIC_STORAGE_BUCKET_NAME = os.environ.get("AWS_PUBLIC_STORAGE_BUCKET_NAME")
AWS_PUBLIC_DEFAULT_ACL = "public-read"
PUBLIC_FILE_STORAGE = "plane.settings.storage.PublicS3Storage"
# Custom Domain settings
parsed_url = urlparse(os.environ.get("WEB_URL", "http://localhost"))
AWS_S3_CUSTOM_DOMAIN = f"{parsed_url.netloc}/{AWS_STORAGE_BUCKET_NAME}"
AWS_S3_URL_PROTOCOL = f"{parsed_url.scheme}:"
# Private S3 bucket settings
AWS_PRIVATE_STORAGE_BUCKET_NAME = os.environ.get("AWS_PRIVATE_STORAGE_BUCKET_NAME")
AWS_PRIVATE_DEFAULT_ACL = "private"
PRIVATE_FILE_STORAGE = "plane.settings.storage.PrivateS3Storage"
## End Storage settings
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")

View File

@ -1,10 +1,8 @@
"""Production settings and globals."""
from urllib.parse import urlparse
import ssl
import certifi
import dj_database_url
from urllib.parse import urlparse
import sentry_sdk
from sentry_sdk.integrations.django import DjangoIntegration
@ -70,91 +68,34 @@ sentry_sdk.init(
profiles_sample_rate=1.0,
)
# The AWS region to connect to.
AWS_REGION = os.environ.get("AWS_REGION")
# The AWS access key to use.
AWS_ACCESS_KEY_ID = os.environ.get("AWS_ACCESS_KEY_ID")
# The AWS secret access key to use.
AWS_SECRET_ACCESS_KEY = os.environ.get("AWS_SECRET_ACCESS_KEY")
# The optional AWS session token to use.
# AWS_SESSION_TOKEN = ""
# The name of the bucket to store files in.
AWS_S3_BUCKET_NAME = os.environ.get("AWS_S3_BUCKET_NAME")
# How to construct S3 URLs ("auto", "path", "virtual").
AWS_S3_ADDRESSING_STYLE = "auto"
# The full URL to the S3 endpoint. Leave blank to use the default region URL.
AWS_S3_ENDPOINT_URL = os.environ.get("AWS_S3_ENDPOINT_URL", "")
# A prefix to be applied to every stored file. This will be joined to every filename using the "/" separator.
AWS_S3_KEY_PREFIX = ""
# Whether to enable authentication for stored files. If True, then generated URLs will include an authentication
# token valid for `AWS_S3_MAX_AGE_SECONDS`. If False, then generated URLs will not include an authentication token,
# and their permissions will be set to "public-read".
AWS_S3_BUCKET_AUTH = False
# How long generated URLs are valid for. This affects the expiry of authentication tokens if `AWS_S3_BUCKET_AUTH`
# is True. It also affects the "Cache-Control" header of the files.
# Important: Changing this setting will not affect existing files.
AWS_S3_MAX_AGE_SECONDS = 60 * 60 # 1 hours.
# A URL prefix to be used for generated URLs. This is useful if your bucket is served through a CDN. This setting
# cannot be used with `AWS_S3_BUCKET_AUTH`.
AWS_S3_PUBLIC_URL = ""
# If True, then files will be stored with reduced redundancy. Check the S3 documentation and make sure you
# understand the consequences before enabling.
# Important: Changing this setting will not affect existing files.
AWS_S3_REDUCED_REDUNDANCY = False
# The Content-Disposition header used when the file is downloaded. This can be a string, or a function taking a
# single `name` argument.
# Important: Changing this setting will not affect existing files.
AWS_S3_CONTENT_DISPOSITION = ""
# The Content-Language header used when the file is downloaded. This can be a string, or a function taking a
# single `name` argument.
# Important: Changing this setting will not affect existing files.
AWS_S3_CONTENT_LANGUAGE = ""
# A mapping of custom metadata for each file. Each value can be a string, or a function taking a
# single `name` argument.
# Important: Changing this setting will not affect existing files.
AWS_S3_METADATA = {}
# If True, then files will be stored using AES256 server-side encryption.
# If this is a string value (e.g., "aws:kms"), that encryption type will be used.
# Otherwise, server-side encryption is not be enabled.
# Important: Changing this setting will not affect existing files.
AWS_S3_ENCRYPT_KEY = False
# The AWS S3 KMS encryption key ID (the `SSEKMSKeyId` parameter) is set from this string if present.
# This is only relevant if AWS S3 KMS server-side encryption is enabled (above).
# AWS_S3_KMS_ENCRYPTION_KEY_ID = ""
# If True, then text files will be stored using gzip content encoding. Files will only be gzipped if their
# compressed size is smaller than their uncompressed size.
# Important: Changing this setting will not affect existing files.
AWS_S3_GZIP = True
# The signature version to use for S3 requests.
AWS_S3_SIGNATURE_VERSION = None
# If True, then files with the same name will overwrite each other. By default it's set to False to have
# extra characters appended.
AWS_S3_FILE_OVERWRITE = False
# AWS Settings End
STORAGES["default"] = {
"BACKEND": "django_s3_storage.storage.S3Storage",
# Storage Settings
STORAGES = {
"staticfiles": {
"BACKEND": "whitenoise.storage.CompressedManifestStaticFilesStorage",
},
}
STORAGES["default"] = {"BACKEND": "storages.backends.s3boto3.S3Boto3Storage"}
# Common AWS settings
AWS_ACCESS_KEY_ID = os.environ.get("AWS_ACCESS_KEY_ID")
AWS_SECRET_ACCESS_KEY = os.environ.get("AWS_SECRET_ACCESS_KEY")
AWS_REGION_NAME = os.environ.get("AWS_REGION")
AWS_S3_ADDRESSING_STYLE = os.environ.get("AWS_S3_ADDRESSING_STYLE")
AWS_S3_FILE_OVERWRITE = False
# Public S3 bucket settings
AWS_PUBLIC_STORAGE_BUCKET_NAME = os.environ.get("AWS_PUBLIC_STORAGE_BUCKET_NAME")
AWS_PUBLIC_DEFAULT_ACL = "public-read"
AWS_S3_PUBLIC_OBJECT_PARAMETERS = {
"CacheControl": "max-age=86400",
}
PUBLIC_FILE_STORAGE = "storages.backends.s3boto3.S3Boto3Storage"
# Private S3 bucket settings
AWS_PRIVATE_STORAGE_BUCKET_NAME = os.environ.get("AWS_PRIVATE_STORAGE_BUCKET_NAME")
AWS_S3_PRIVATE_FILE_OVERWRITE = False
AWS_PRIVATE_DEFAULT_ACL = "private"
PRIVATE_FILE_STORAGE = "storages.backends.s3boto3.S3Boto3Storage"
# End Storage settings
# Enable Connection Pooling (if desired)
# DATABASES['default']['ENGINE'] = 'django_postgrespool'

View File

@ -0,0 +1,27 @@
from django.conf import settings
from storages.backends.s3boto3 import S3Boto3Storage
from urllib.parse import urlparse
class PublicS3Storage(S3Boto3Storage):
"""Configuration for the Public bucket storage"""
bucket_name = settings.AWS_PUBLIC_STORAGE_BUCKET_NAME
default_acl = settings.AWS_PUBLIC_DEFAULT_ACL
querystring_auth = False
# For self hosted docker and minio
if settings.DOCKERIZED and settings.USE_MINIO:
custom_domain = f"{urlparse(settings.WEB_URL).netloc}/{bucket_name}"
url_protocol = f"{urlparse(settings.WEB_URL).scheme}:"
class PrivateS3Storage(S3Boto3Storage):
"""Configuration for the Private bucket storage"""
bucket_name = settings.AWS_PRIVATE_STORAGE_BUCKET_NAME
region_name = settings.AWS_REGION_NAME
addressing_style = settings.AWS_S3_ADDRESSING_STYLE
default_acl = settings.AWS_PRIVATE_DEFAULT_ACL
# For self hosted docker and minio
if settings.DOCKERIZED and settings.USE_MINIO:
custom_domain = f"{urlparse(settings.WEB_URL).netloc}/{bucket_name}"
url_protocol = f"{urlparse(settings.WEB_URL).scheme}:"

View File

@ -111,7 +111,14 @@ services:
createbuckets:
image: minio/mc
entrypoint: >
/bin/sh -c " /usr/bin/mc config host add plane-minio http://plane-minio:9000 \$AWS_ACCESS_KEY_ID \$AWS_SECRET_ACCESS_KEY; /usr/bin/mc mb plane-minio/\$AWS_S3_BUCKET_NAME; /usr/bin/mc anonymous set download plane-minio/\$AWS_S3_BUCKET_NAME; exit 0; "
/bin/sh -c "
/usr/bin/mc config host add plane-minio http://plane-minio:9000 \$AWS_ACCESS_KEY_ID \$AWS_SECRET_ACCESS_KEY;
/usr/bin/mc mb plane-minio/\$AWS_PUBLIC_STORAGE_BUCKET_NAME;
/usr/bin/mc anonymous set download plane-minio/\$AWS_PUBLIC_STORAGE_BUCKET_NAME;
/usr/bin/mc config host add plane-minio http://plane-minio:9000 \$AWS_ACCESS_KEY_ID \$AWS_SECRET_ACCESS_KEY;
/usr/bin/mc mb plane-minio/\$AWS_PRIVATE_STORAGE_BUCKET_NAME;
/usr/bin/mc anonymous set none plane-minio/\$AWS_PRIVATE_STORAGE_BUCKET_NAME; exit 0;
"
env_file:
- .env
depends_on:
@ -128,7 +135,8 @@ services:
- ${NGINX_PORT}:80
environment:
FILE_SIZE_LIMIT: ${FILE_SIZE_LIMIT:-5242880}
BUCKET_NAME: ${AWS_S3_BUCKET_NAME:-uploads}
PUBLIC_BUCKET_NAME: ${AWS_PUBLIC_STORAGE_BUCKET_NAME:-uploads}
PRIVATE_BUCKET_NAME: ${AWS_PRIVATE_STORAGE_BUCKET_NAME:-uploads-private}
depends_on:
- web
- api

View File

@ -29,8 +29,12 @@ http {
proxy_pass http://space:3000/spaces/;
}
location /${BUCKET_NAME}/ {
proxy_pass http://plane-minio:9000/uploads/;
location /${PUBLIC_BUCKET_NAME}/ {
proxy_pass http://plane-minio:9000/${PUBLIC_BUCKET_NAME}/;
}
location /${PRIVATE_BUCKET_NAME}/ {
proxy_pass http://plane-minio:9000/${PRIVATE_BUCKET_NAME}/;
}
}
}