dev: new private bucket for storing attachments

This commit is contained in:
pablohashescobar 2023-09-22 12:25:18 +05:30
parent bd077e6500
commit edceef71b4
9 changed files with 171 additions and 184 deletions

View File

@ -0,0 +1,39 @@
# Generated by Django 4.2.3 on 2023-09-21 14:16
import boto3
import botocore
from django.db import migrations
from django.conf import settings
def move_s3_objects(apps, schema_editor):
IssueAttachment = apps.get_model("db", "IssueAttachment")
# Your source and destination bucket names
source_bucket = settings.AWS_PUBLIC_STORAGE_BUCKET_NAME
destination_bucket = settings.AWS_PRIVATE_STORAGE_BUCKET_NAME
s3_client = boto3.client(
"s3",
aws_access_key_id=settings.AWS_ACCESS_KEY_ID,
aws_secret_access_key=settings.AWS_SECRET_ACCESS_KEY,
)
for key in IssueAttachment.objects.values_list("asset", flat=True):
try:
copy_source = {"Bucket": source_bucket, "Key": key}
s3_client.copy_object(
Bucket=destination_bucket, CopySource=copy_source, Key=key
)
except Exception as e:
pass
class Migration(migrations.Migration):
dependencies = [
("db", "0046_auto_20230919_1421"),
]
operations = [
migrations.RunPython(move_s3_objects),
]

View File

@ -8,7 +8,7 @@ from django.conf import settings
# Module import
from . import BaseModel
from plane.settings.storage import PublicS3Storage
def get_upload_path(instance, filename):
if instance.workspace_id is not None:
@ -32,6 +32,7 @@ class FileAsset(BaseModel):
validators=[
file_size,
],
storage=PublicS3Storage,
)
workspace = models.ForeignKey(
"db.Workspace", on_delete=models.CASCADE, null=True, related_name="assets"

View File

@ -14,7 +14,7 @@ from django.core.exceptions import ValidationError
# Module imports
from . import ProjectBaseModel
from plane.utils.html_processor import strip_tags
from plane.settings.storage import PrivateS3Storage
# TODO: Handle identifiers for Bulk Inserts - nk
class IssueManager(models.Manager):
@ -267,6 +267,7 @@ class IssueAttachment(ProjectBaseModel):
validators=[
file_size,
],
storage=PrivateS3Storage,
)
issue = models.ForeignKey(
"db.Issue", on_delete=models.CASCADE, related_name="issue_attachment"

View File

@ -36,6 +36,7 @@ INSTALLED_APPS = [
"corsheaders",
"taggit",
"django_celery_beat",
"storages",
]
MIDDLEWARE = [

View File

@ -114,3 +114,31 @@ CELERY_BROKER_URL = os.environ.get("REDIS_URL")
GITHUB_ACCESS_TOKEN = os.environ.get("GITHUB_ACCESS_TOKEN", False)
ENABLE_SIGNUP = os.environ.get("ENABLE_SIGNUP", "1") == "1"
# Storage Settings
STORAGES = {
"staticfiles": {
"BACKEND": "whitenoise.storage.CompressedManifestStaticFilesStorage",
},
}
STORAGES["default"] = {"BACKEND": "storages.backends.s3boto3.S3Boto3Storage"}
# Common AWS settings
AWS_ACCESS_KEY_ID = os.environ.get("AWS_ACCESS_KEY_ID")
AWS_SECRET_ACCESS_KEY = os.environ.get("AWS_SECRET_ACCESS_KEY")
AWS_REGION_NAME = os.environ.get("AWS_REGION")
AWS_S3_ADDRESSING_STYLE = os.environ.get("AWS_S3_ADDRESSING_STYLE")
# Public S3 bucket settings
AWS_PUBLIC_STORAGE_BUCKET_NAME = os.environ.get("AWS_PUBLIC_STORAGE_BUCKET_NAME")
AWS_PUBLIC_DEFAULT_ACL = "public-read"
AWS_S3_PUBLIC_OBJECT_PARAMETERS = {
"CacheControl": "max-age=86400",
}
PUBLIC_FILE_STORAGE = "storages.backends.s3boto3.S3Boto3Storage"
# Private S3 bucket settings
AWS_PRIVATE_STORAGE_BUCKET_NAME = os.environ.get("AWS_PRIVATE_STORAGE_BUCKET_NAME")
AWS_S3_PRIVATE_FILE_OVERWRITE = False
AWS_PRIVATE_DEFAULT_ACL = "private"
PRIVATE_FILE_STORAGE = "storages.backends.s3boto3.S3Boto3Storage"
# End Storage Settings

View File

@ -89,91 +89,33 @@ if bool(os.environ.get("SENTRY_DSN", False)):
profiles_sample_rate=1.0,
)
# The AWS region to connect to.
AWS_REGION = os.environ.get("AWS_REGION", "")
# The AWS access key to use.
AWS_ACCESS_KEY_ID = os.environ.get("AWS_ACCESS_KEY_ID", "")
# The AWS secret access key to use.
AWS_SECRET_ACCESS_KEY = os.environ.get("AWS_SECRET_ACCESS_KEY", "")
# The optional AWS session token to use.
# AWS_SESSION_TOKEN = ""
# The name of the bucket to store files in.
AWS_S3_BUCKET_NAME = os.environ.get("AWS_S3_BUCKET_NAME")
# How to construct S3 URLs ("auto", "path", "virtual").
AWS_S3_ADDRESSING_STYLE = "auto"
# The full URL to the S3 endpoint. Leave blank to use the default region URL.
AWS_S3_ENDPOINT_URL = os.environ.get("AWS_S3_ENDPOINT_URL", "")
# A prefix to be applied to every stored file. This will be joined to every filename using the "/" separator.
AWS_S3_KEY_PREFIX = ""
# Whether to enable authentication for stored files. If True, then generated URLs will include an authentication
# token valid for `AWS_S3_MAX_AGE_SECONDS`. If False, then generated URLs will not include an authentication token,
# and their permissions will be set to "public-read".
AWS_S3_BUCKET_AUTH = False
# How long generated URLs are valid for. This affects the expiry of authentication tokens if `AWS_S3_BUCKET_AUTH`
# is True. It also affects the "Cache-Control" header of the files.
# Important: Changing this setting will not affect existing files.
AWS_S3_MAX_AGE_SECONDS = 60 * 60 # 1 hours.
# A URL prefix to be used for generated URLs. This is useful if your bucket is served through a CDN. This setting
# cannot be used with `AWS_S3_BUCKET_AUTH`.
AWS_S3_PUBLIC_URL = ""
# If True, then files will be stored with reduced redundancy. Check the S3 documentation and make sure you
# understand the consequences before enabling.
# Important: Changing this setting will not affect existing files.
AWS_S3_REDUCED_REDUNDANCY = False
# The Content-Disposition header used when the file is downloaded. This can be a string, or a function taking a
# single `name` argument.
# Important: Changing this setting will not affect existing files.
AWS_S3_CONTENT_DISPOSITION = ""
# The Content-Language header used when the file is downloaded. This can be a string, or a function taking a
# single `name` argument.
# Important: Changing this setting will not affect existing files.
AWS_S3_CONTENT_LANGUAGE = ""
# A mapping of custom metadata for each file. Each value can be a string, or a function taking a
# single `name` argument.
# Important: Changing this setting will not affect existing files.
AWS_S3_METADATA = {}
# If True, then files will be stored using AES256 server-side encryption.
# If this is a string value (e.g., "aws:kms"), that encryption type will be used.
# Otherwise, server-side encryption is not be enabled.
# Important: Changing this setting will not affect existing files.
AWS_S3_ENCRYPT_KEY = False
# The AWS S3 KMS encryption key ID (the `SSEKMSKeyId` parameter) is set from this string if present.
# This is only relevant if AWS S3 KMS server-side encryption is enabled (above).
# AWS_S3_KMS_ENCRYPTION_KEY_ID = ""
# If True, then text files will be stored using gzip content encoding. Files will only be gzipped if their
# compressed size is smaller than their uncompressed size.
# Important: Changing this setting will not affect existing files.
AWS_S3_GZIP = True
# The signature version to use for S3 requests.
AWS_S3_SIGNATURE_VERSION = None
# If True, then files with the same name will overwrite each other. By default it's set to False to have
# extra characters appended.
AWS_S3_FILE_OVERWRITE = False
STORAGES["default"] = {
"BACKEND": "django_s3_storage.storage.S3Storage",
# Storage Settings
STORAGES = {
"staticfiles": {
"BACKEND": "whitenoise.storage.CompressedManifestStaticFilesStorage",
},
}
STORAGES["default"] = {"BACKEND": "storages.backends.s3boto3.S3Boto3Storage"}
# Common AWS settings
AWS_ACCESS_KEY_ID = os.environ.get("AWS_ACCESS_KEY_ID")
AWS_SECRET_ACCESS_KEY = os.environ.get("AWS_SECRET_ACCESS_KEY")
AWS_REGION_NAME = os.environ.get("AWS_REGION")
AWS_S3_ADDRESSING_STYLE = os.environ.get("AWS_S3_ADDRESSING_STYLE")
# AWS Settings End
# Public S3 bucket settings
AWS_PUBLIC_STORAGE_BUCKET_NAME = os.environ.get("AWS_PUBLIC_STORAGE_BUCKET_NAME")
AWS_PUBLIC_DEFAULT_ACL = "public-read"
AWS_S3_PUBLIC_OBJECT_PARAMETERS = {
"CacheControl": "max-age=86400",
}
PUBLIC_FILE_STORAGE = "storages.backends.s3boto3.S3Boto3Storage"
# Private S3 bucket settings
AWS_PRIVATE_STORAGE_BUCKET_NAME = os.environ.get("AWS_PRIVATE_STORAGE_BUCKET_NAME")
AWS_S3_PRIVATE_FILE_OVERWRITE = False
AWS_PRIVATE_DEFAULT_ACL = "private"
PRIVATE_FILE_STORAGE = "storages.backends.s3boto3.S3Boto3Storage"
# End Storage Settings
# Enable Connection Pooling (if desired)
# DATABASES['default']['ENGINE'] = 'django_postgrespool'
@ -237,4 +179,3 @@ ENABLE_SIGNUP = os.environ.get("ENABLE_SIGNUP", "1") == "1"
SCOUT_MONITOR = os.environ.get("SCOUT_MONITOR", False)
SCOUT_KEY = os.environ.get("SCOUT_KEY", "")
SCOUT_NAME = "Plane"

View File

@ -55,33 +55,33 @@ CORS_ALLOW_HEADERS = [
CORS_ALLOW_CREDENTIALS = True
CORS_ALLOW_ALL_ORIGINS = True
# Storage Settings
STORAGES = {
"staticfiles": {
"BACKEND": "whitenoise.storage.CompressedManifestStaticFilesStorage",
},
}
INSTALLED_APPS += ("storages",)
STORAGES["default"] = {"BACKEND": "storages.backends.s3boto3.S3Boto3Storage"}
# The AWS access key to use.
# Common AWS settings
AWS_ACCESS_KEY_ID = os.environ.get("AWS_ACCESS_KEY_ID", "access-key")
# The AWS secret access key to use.
AWS_SECRET_ACCESS_KEY = os.environ.get("AWS_SECRET_ACCESS_KEY", "secret-key")
# The name of the bucket to store files in.
AWS_STORAGE_BUCKET_NAME = os.environ.get("AWS_S3_BUCKET_NAME", "uploads")
# The full URL to the S3 endpoint. Leave blank to use the default region URL.
AWS_S3_ENDPOINT_URL = os.environ.get(
"AWS_S3_ENDPOINT_URL", "http://plane-minio:9000"
)
# Default permissions
AWS_DEFAULT_ACL = "public-read"
AWS_QUERYSTRING_AUTH = False
AWS_S3_FILE_OVERWRITE = False
# Public S3 bucket settings
AWS_PUBLIC_STORAGE_BUCKET_NAME = os.environ.get("AWS_PUBLIC_STORAGE_BUCKET_NAME")
AWS_PUBLIC_DEFAULT_ACL = "public-read"
AWS_S3_PUBLIC_OBJECT_PARAMETERS = {
"CacheControl": "max-age=86400",
}
PUBLIC_FILE_STORAGE = "storages.backends.s3boto3.S3Boto3Storage"
# Custom Domain settings
parsed_url = urlparse(os.environ.get("WEB_URL", "http://localhost"))
AWS_S3_CUSTOM_DOMAIN = f"{parsed_url.netloc}/{AWS_STORAGE_BUCKET_NAME}"
AWS_S3_URL_PROTOCOL = f"{parsed_url.scheme}:"
# Private S3 bucket settings
AWS_PRIVATE_STORAGE_BUCKET_NAME = os.environ.get("AWS_PRIVATE_STORAGE_BUCKET_NAME")
AWS_S3_PRIVATE_FILE_OVERWRITE = False
AWS_PRIVATE_DEFAULT_ACL = "private"
PRIVATE_FILE_STORAGE = "storages.backends.s3boto3.S3Boto3Storage"
## End Storage settings
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")

View File

@ -70,91 +70,34 @@ sentry_sdk.init(
profiles_sample_rate=1.0,
)
# The AWS region to connect to.
AWS_REGION = os.environ.get("AWS_REGION")
# The AWS access key to use.
AWS_ACCESS_KEY_ID = os.environ.get("AWS_ACCESS_KEY_ID")
# The AWS secret access key to use.
AWS_SECRET_ACCESS_KEY = os.environ.get("AWS_SECRET_ACCESS_KEY")
# The optional AWS session token to use.
# AWS_SESSION_TOKEN = ""
# The name of the bucket to store files in.
AWS_S3_BUCKET_NAME = os.environ.get("AWS_S3_BUCKET_NAME")
# How to construct S3 URLs ("auto", "path", "virtual").
AWS_S3_ADDRESSING_STYLE = "auto"
# The full URL to the S3 endpoint. Leave blank to use the default region URL.
AWS_S3_ENDPOINT_URL = os.environ.get("AWS_S3_ENDPOINT_URL", "")
# A prefix to be applied to every stored file. This will be joined to every filename using the "/" separator.
AWS_S3_KEY_PREFIX = ""
# Whether to enable authentication for stored files. If True, then generated URLs will include an authentication
# token valid for `AWS_S3_MAX_AGE_SECONDS`. If False, then generated URLs will not include an authentication token,
# and their permissions will be set to "public-read".
AWS_S3_BUCKET_AUTH = False
# How long generated URLs are valid for. This affects the expiry of authentication tokens if `AWS_S3_BUCKET_AUTH`
# is True. It also affects the "Cache-Control" header of the files.
# Important: Changing this setting will not affect existing files.
AWS_S3_MAX_AGE_SECONDS = 60 * 60 # 1 hours.
# A URL prefix to be used for generated URLs. This is useful if your bucket is served through a CDN. This setting
# cannot be used with `AWS_S3_BUCKET_AUTH`.
AWS_S3_PUBLIC_URL = ""
# If True, then files will be stored with reduced redundancy. Check the S3 documentation and make sure you
# understand the consequences before enabling.
# Important: Changing this setting will not affect existing files.
AWS_S3_REDUCED_REDUNDANCY = False
# The Content-Disposition header used when the file is downloaded. This can be a string, or a function taking a
# single `name` argument.
# Important: Changing this setting will not affect existing files.
AWS_S3_CONTENT_DISPOSITION = ""
# The Content-Language header used when the file is downloaded. This can be a string, or a function taking a
# single `name` argument.
# Important: Changing this setting will not affect existing files.
AWS_S3_CONTENT_LANGUAGE = ""
# A mapping of custom metadata for each file. Each value can be a string, or a function taking a
# single `name` argument.
# Important: Changing this setting will not affect existing files.
AWS_S3_METADATA = {}
# If True, then files will be stored using AES256 server-side encryption.
# If this is a string value (e.g., "aws:kms"), that encryption type will be used.
# Otherwise, server-side encryption is not be enabled.
# Important: Changing this setting will not affect existing files.
AWS_S3_ENCRYPT_KEY = False
# The AWS S3 KMS encryption key ID (the `SSEKMSKeyId` parameter) is set from this string if present.
# This is only relevant if AWS S3 KMS server-side encryption is enabled (above).
# AWS_S3_KMS_ENCRYPTION_KEY_ID = ""
# If True, then text files will be stored using gzip content encoding. Files will only be gzipped if their
# compressed size is smaller than their uncompressed size.
# Important: Changing this setting will not affect existing files.
AWS_S3_GZIP = True
# The signature version to use for S3 requests.
AWS_S3_SIGNATURE_VERSION = None
# If True, then files with the same name will overwrite each other. By default it's set to False to have
# extra characters appended.
AWS_S3_FILE_OVERWRITE = False
# AWS Settings End
STORAGES["default"] = {
"BACKEND": "django_s3_storage.storage.S3Storage",
# Storage Settings
STORAGES = {
"staticfiles": {
"BACKEND": "whitenoise.storage.CompressedManifestStaticFilesStorage",
},
}
STORAGES["default"] = {"BACKEND": "storages.backends.s3boto3.S3Boto3Storage"}
# Common AWS settings
AWS_ACCESS_KEY_ID = os.environ.get("AWS_ACCESS_KEY_ID")
AWS_SECRET_ACCESS_KEY = os.environ.get("AWS_SECRET_ACCESS_KEY")
AWS_REGION_NAME = os.environ.get("AWS_REGION")
AWS_S3_ADDRESSING_STYLE = os.environ.get("AWS_S3_ADDRESSING_STYLE")
# Public S3 bucket settings
AWS_PUBLIC_STORAGE_BUCKET_NAME = os.environ.get("AWS_PUBLIC_STORAGE_BUCKET_NAME")
AWS_PUBLIC_DEFAULT_ACL = "public-read"
AWS_S3_PUBLIC_OBJECT_PARAMETERS = {
"CacheControl": "max-age=86400",
}
PUBLIC_FILE_STORAGE = "storages.backends.s3boto3.S3Boto3Storage"
# Private S3 bucket settings
AWS_PRIVATE_STORAGE_BUCKET_NAME = os.environ.get("AWS_PRIVATE_STORAGE_BUCKET_NAME")
AWS_S3_PRIVATE_FILE_OVERWRITE = False
AWS_PRIVATE_DEFAULT_ACL = "private"
PRIVATE_FILE_STORAGE = "storages.backends.s3boto3.S3Boto3Storage"
# End Storage settings
# Enable Connection Pooling (if desired)
# DATABASES['default']['ENGINE'] = 'django_postgrespool'

View File

@ -0,0 +1,33 @@
import os
from django.conf import settings
from storages.backends.s3boto3 import S3Boto3Storage
from urllib.parse import urlparse
class PublicS3Storage(S3Boto3Storage):
"""Configuration for the Public bucket storage"""
bucket_name = settings.AWS_PUBLIC_STORAGE_BUCKET_NAME
default_acl = settings.AWS_PUBLIC_DEFAULT_ACL
object_parameters = settings.AWS_S3_PUBLIC_OBJECT_PARAMETERS
querystring_auth = False
# For self hosted docker and minio
if settings.DOCKERIZED and settings.USE_MINIO:
parsed_url = urlparse(settings.WEB_URL)
custom_domain = f"{parsed_url.netloc}/{settings.bucket_name}"
url_protocol = f"{parsed_url.scheme}:"
class PrivateS3Storage(S3Boto3Storage):
"""Configuration for the Private bucket storage"""
bucket_name = settings.AWS_PRIVATE_STORAGE_BUCKET_NAME
default_acl = settings.AWS_PRIVATE_DEFAULT_ACL
file_overwrite = settings.AWS_S3_PRIVATE_FILE_OVERWRITE
region_name = settings.AWS_REGION_NAME
addressing_style = settings.AWS_S3_ADDRESSING_STYLE
# For self hosted docker and minio
if settings.DOCKERIZED and settings.USE_MINIO:
parsed_url = urlparse(settings.WEB_URL)
custom_domain = f"{parsed_url.netloc}/{settings.bucket_name}"
url_protocol = f"{parsed_url.scheme}:"