Compare commits

...

10 Commits

  1. 8
      .dockerignore
  2. 1
      .gitignore
  3. 1
      .python-version
  4. 67
      Dockerfile
  5. 242
      acm-config-default.json
  6. 211
      acm.py
  7. 0
      acm/__init__.py
  8. 81
      acm/asyncio.py
  9. 311
      acm/config.py
  10. 46
      acm/logging.py
  11. 3
      acm/s3.py
  12. 39
      acm/utility.py
  13. 31
      acm/version.py
  14. 10
      docker/entrypoint.sh
  15. 141
      poetry.lock
  16. 19
      pyproject.toml
  17. 2
      requirements.txt
  18. 9
      scripts/build_container.sh
  19. 0
      scripts/upload_pipeline.sh
  20. 2
      setup.py

8
.dockerignore

@ -0,0 +1,8 @@
.git/
.idea/
scripts/
venv/
.gitignore
Dockerfile
pipeline.yml

1
.gitignore

@ -1,5 +1,4 @@
.idea/
venv/
l_venv/

1
.python-version

@ -0,0 +1 @@
3.9.5

67
Dockerfile

@ -2,61 +2,78 @@ FROM ubuntu:20.04
LABEL maintainer="Drew Short <warrick@sothr.com>" \
name="acm" \
version="1.4.0" \
version="2.0.0" \
description="Prepackaged ACM with defaults and tooling"
ENV LC_ALL=C.UTF-8
ENV LANG=C.UTF-8
ENV DEBIAN_FRONTEND=noninteractive
ARG MOZJPEG_VERSION="3.3.1"
# Install tooling (ffmpeg, opusenc, optipng, python3)
# https://packages.ubuntu.com/search?suite=focal&section=all&arch=any&keywords=curl&searchon=names
RUN ln -fs /usr/share/zoneinfo/UCT /etc/localtime \
&& apt-get update -y \
&& apt-get install -y --fix-missing \
curl \
ffmpeg \
opus-tools \
optipng \
python3 \
python3-pip \
tzdata \
webp
&& apt-get install -f -y --no-install-recommends \
build-essential=12.8ubuntu1 \
curl=7.68.0-1ubuntu2.7 \
ffmpeg=7:4.2.4-1ubuntu0.1 \
opus-tools=0.1.10-1 \
optipng=0.7.7-1 \
python-pip-whl=20.0.2-5ubuntu1.5 \
python3=3.8.2-0ubuntu2 \
python3-pip=20.0.2-5ubuntu1.5 \
python3-distutils=3.8.10-0ubuntu1~20.04 \
python3-apt=2.0.0ubuntu0.20.04.3 \
tzdata=2021e-0ubuntu0.20.04 \
webp=0.6.1-2ubuntu0.20.04.1 \
&& apt-get upgrade -y
WORKDIR /tmp
# Install mozjpeg
RUN curl -LS -o mozjpeg.deb "https://nexus.nulloctet.com/repository/public/mozjpeg/mozjpeg_${MOZJPEG_VERSION}_amd64.deb" \
ARG MOZJPEG_VERSION="3.3.1"
ARG MOZJPEG_ARCH="amd64"
RUN curl -LS -o mozjpeg.deb "https://nexus.nulloctet.com/repository/public/mozjpeg/mozjpeg_${MOZJPEG_VERSION}_${MOZJPEG_ARCH}.deb" \
&& dpkg -i mozjpeg.deb \
&& ln -sf /opt/mozjpeg/bin/cjpeg /bin/cjpeg \
&& rm -f mozjpeg.deb
# Cleanup image programs and cache
RUN apt-get remove -y curl \
&& rm -rf /var/lib/apt/lists/*
# Install poetry
ARG POETRY_VERSION="1.1.12"
RUN curl -sSL "https://raw.githubusercontent.com/python-poetry/poetry/${POETRY_VERSION}/get-poetry.py" | python3 -
ENV PATH="${PATH}:/root/.poetry/bin"
RUN poetry config virtualenvs.create false
WORKDIR /app
# Copy application
COPY acm-config-default.json acm.py requirements.txt /app/
# Install application requirements
RUN python3 -m pip install -r requirements.txt
COPY pyproject.toml poetry.lock /app/
RUN poetry install
# Cleanup image programs and cache
RUN apt-get remove -y \
build-essential \
curl \
python3-pip \
&& rm -rf /var/lib/apt/lists/* \
&& rm -rf /tmp/*
# Copy application
COPY . /app/
WORKDIR /bin
# Copy application helper script
COPY docker/acm acm
RUN mv /app/docker/acm acm
# Make script executable
RUN chmod +x acm
VOLUME ["/input", "/output"]
WORKDIR /app
COPY acm-config.json.example acm-config.json
COPY docker/entrypoint.sh .
RUN mv docker/* . \
&& chmod +x entrypoint.sh \
&& rm -rf docker \
&& mv acm-config.json.example acm-config.json
CMD ["sh", "-c", "find /input/ -type f | /app/entrypoint.sh --stdin --remove-prefix /input/ compress -p default -d /output/"]

242
acm-config-default.json

@ -1,242 +0,0 @@
{
"concurrency": 0,
"profiles": {
"default": {
"jpeg": {
"version": "1.4.0",
"processors": [
"cjpeg"
],
"extensions": [
"jpg",
"jpeg"
],
"outputExtension": "jpg",
"forcePreserveSmallerInput": true,
"command": "cjpeg -optimize -quality 90 -progressive -outfile {output_file} {input_file}"
},
"png": {
"version": "1.4.0",
"processors": [
"optipng"
],
"extensions": [
"png"
],
"outputExtension": "png",
"forcePreserveSmallerInput": true,
"command": "optipng -o2 -strip all -out {output_file} {input_file}"
},
"video": {
"version": "1.4.0",
"processors": [
"ffmpeg"
],
"extensions": [
"mp4",
"webm"
],
"outputExtension": "webm",
"command": "ffmpeg -hide_banner -loglevel panic -i {input_file} -c:v libvpx-vp9 -b:v 0 -crf 29 -c:a libopus {output_file}"
},
"audio": {
"version": "1.4.0",
"processors": [
"ffmpeg",
"opusenc"
],
"extensions": [
"wav",
"mp3"
],
"outputExtension": "ogg",
"command": "ffmpeg -hide_banner -loglevel panic -i {input_file} -f wav -| opusenc --bitrate 64 --vbr --downmix-stereo --discard-comments --discard-pictures - {output_file}"
}
},
"placebo": {
"jpeg": {
"version": "1.4.0",
"processors": [
"cp"
],
"extensions": [
"jpg",
"jpeg"
],
"outputExtension": "jpg",
"preserveInputExtension": true,
"preserveSmallerInput": false,
"command": "cp {input_file} {output_file}"
},
"png": {
"version": "1.4.0",
"processors": [
"cp"
],
"extensions": [
"png"
],
"outputExtension": "png",
"preserveInputExtension": true,
"preserveSmallerInput": false,
"command": "cp {input_file} {output_file}"
},
"video": {
"version": "1.4.0",
"processors": [
"cp"
],
"extensions": [
"mp4",
"webm"
],
"outputExtension": "mp4",
"preserveInputExtension": true,
"preserveSmallerInput": false,
"command": "cp {input_file} {output_file}"
},
"audio": {
"version": "1.4.0",
"processors": [
"cp"
],
"extensions": [
"wav",
"mp3"
],
"outputExtension": "ogg",
"preserveInputExtension": true,
"preserveSmallerInput": false,
"command": "cp {input_file} {output_file}"
}
},
"webp": {
"jpeg": {
"version": "1.4.0",
"processors": [
"cwebp"
],
"extensions": [
"jpg",
"jpeg"
],
"outputExtension": "webp",
"command": "cwebp -jpeg_like -q 90 -o {output_file} {input_file}"
},
"png": {
"version": "1.4.0",
"processors": [
"cwebp"
],
"extensions": [
"png"
],
"outputExtension": "webp",
"command": "cwebp -lossless -o {output_file} {input_file}"
}
},
"aggressive": {
"jpeg": {
"version": "1.4.0",
"processors": [
"ffmpeg",
"cjpeg"
],
"extensions": [
"jpg",
"jpeg"
],
"outputExtension": "jpg",
"forcePreserveSmallerInput": true,
"command": "export FILE={output_file} && export TEMP_FILE=${FILE}_tmp.jpg && ffmpeg -i {input_file} -vf scale=-1:720 ${TEMP_FILE} && cjpeg -optimize -quality 75 -progressive -outfile {output_file} ${TEMP_FILE} && rm ${TEMP_FILE}"
},
"png": {
"version": "1.4.0",
"processors": [
"optipng"
],
"extensions": [
"png"
],
"outputExtension": "png",
"forcePreserveSmallerInput": true,
"command": "optipng -o2 -strip all -out {output_file} {input_file}"
},
"video": {
"version": "1.4.0",
"processors": [
"ffmpeg"
],
"extensions": [
"mp4",
"webm"
],
"outputExtension": "webm",
"command": "ffmpeg -hide_banner -loglevel panic -i {input_file} -vf scale=-1:720 -c:v libvpx-vp9 -b:v 0 -crf 38 -c:a libopus {output_file}"
},
"audio": {
"version": "1.4.0",
"processors": [
"ffmpeg",
"opusenc"
],
"extensions": [
"wav",
"mp3"
],
"outputExtension": "ogg",
"command": "ffmpeg -hide_banner -loglevel panic -i {input_file} -f wav -| opusenc --bitrate 64 --vbr --downmix-stereo --discard-comments --discard-pictures - {output_file}"
}
},
"aggressive-webp": {
"jpeg": {
"version": "1.4.0",
"processors": [
"cwebp"
],
"extensions": [
"jpg",
"jpeg"
],
"outputExtension": "webp",
"command": "export FILE={output_file} && export TEMP_FILE=${FILE}_tmp.jpg && ffmpeg -i {input_file} -vf scale=-1:720 ${TEMP_FILE} && cwebp -jpeg_like -q 75 -o {output_file} ${TEMP_FILE} && rm ${TEMP_FILE}"
},
"png": {
"version": "1.4.0",
"processors": [
"cwebp"
],
"extensions": [
"png"
],
"outputExtension": "webp",
"command": "cwebp -o {output_file} ${input_file}"
},
"video": {
"version": "1.4.0",
"processors": [
"ffmpeg"
],
"extensions": [
"mp4",
"webm"
],
"outputExtension": "webm",
"command": "ffmpeg -hide_banner -loglevel panic -i {input_file} -vf scale=-1:720 -c:v libvpx-vp9 -b:v 0 -crf 38 -c:a libopus {output_file}"
},
"audio": {
"version": "1.4.0",
"processors": [
"ffmpeg",
"opusenc"
],
"extensions": [
"wav",
"mp3"
],
"outputExtension": "ogg",
"command": "ffmpeg -hide_banner -loglevel panic -i {input_file} -f wav -| opusenc --bitrate 64 --vbr --downmix-stereo --discard-comments --discard-pictures - {output_file}"
}
}
}
}

211
acm.py

@ -1,9 +1,10 @@
#!/usr/bin/env python
#!/usr/bin/env python3
import asyncio
import collections.abc
import hashlib
import io
import json
import logging
import os
import pathlib
import platform
@ -12,103 +13,22 @@ import tempfile
from typing import List, Dict, Callable
import click
from minio import Minio, ResponseError
from minio.error import NoSuchKey
from minio import Minio, InvalidResponseError
from minio.error import S3Error
# Size of the buffer to read files with
BUF_SIZE = 4096
from acm.asyncio import make_chunks, run_asyncio_commands, run_command_shell
from acm.config import get_default_config
from acm.logging import setup_basic_logging, update_logging_level
from acm.utility import get_file_sha256sum, get_string_sha256sum
from acm.version import VERSION
# Application Version
VERSION = "1.4.0"
###########
# AsyncIO #
###########
async def run_command_shell(
command,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
on_success: List[Callable] = [()]
):
"""Run command in subprocess (shell).
Note:
This can be used if you wish to execute e.g. "copy"
on Windows, which can only be executed in the shell.
"""
process = await asyncio.create_subprocess_shell(
command, stdout=stdout, stderr=stderr
)
process_stdout, process_stderr = await process.communicate()
if process.returncode == 0:
for success_callable in on_success:
success_callable()
if stdout != asyncio.subprocess.DEVNULL:
result = process_stdout.decode().strip()
return result
else:
return None
def make_chunks(tasks, chunk_size):
"""Yield successive chunk_size-sized chunks from tasks.
Note:
Taken from https://stackoverflow.com/a/312464
modified for python 3 only
"""
for i in range(0, len(tasks), chunk_size):
yield tasks[i: i + chunk_size]
def run_asyncio_commands(tasks, max_concurrent_tasks=0):
"""Run tasks asynchronously using asyncio and return results.
If max_concurrent_tasks are set to 0, no limit is applied.
Note:
By default, Windows uses SelectorEventLoop, which does not support
subprocesses. Therefore ProactorEventLoop is used on Windows.
https://docs.python.org/3/library/asyncio-eventloops.html#windows
"""
all_results = []
if max_concurrent_tasks == 0:
chunks = [tasks]
num_chunks = len(chunks)
else:
chunks = make_chunks(tasks=tasks, chunk_size=max_concurrent_tasks)
num_chunks = len(
list(make_chunks(tasks=tasks, chunk_size=max_concurrent_tasks)))
if asyncio.get_event_loop().is_closed():
asyncio.set_event_loop(asyncio.new_event_loop())
if platform.system() == "Windows":
asyncio.set_event_loop(asyncio.ProactorEventLoop())
loop = asyncio.get_event_loop()
chunk = 1
for tasks_in_chunk in chunks:
commands = asyncio.gather(*tasks_in_chunk)
results = loop.run_until_complete(commands)
all_results += results
chunk += 1
loop.close()
return all_results
LOG = setup_basic_logging("acm")
###########
# Helpers #
###########
def update(d, u):
for k, v in u.items():
if isinstance(v, collections.abc.Mapping):
@ -174,25 +94,13 @@ def prep_s3(ctx):
return s3_bucket, s3
def get_file_sha256sum(stored_data, profile, file):
def get_stored_and_computed_sha256sums(stored_data, profile, file):
stored_file_hash = stored_data['sha256sum']
stored_profile_hash = stored_data['profileHash']
sha256sum = hashlib.sha256()
with open(file, 'rb') as f:
for byte_block in iter(lambda: f.read(BUF_SIZE), b""):
sha256sum.update(byte_block)
calculated_file_hash = sha256sum.hexdigest()
calculated_file_hash = get_file_sha256sum(file)
return stored_profile_hash, stored_file_hash, calculated_file_hash
def get_string_sha256sum(string: str, encoding='utf-8') -> str:
sha256sum = hashlib.sha256()
with io.BytesIO(json.dumps(string).encode(encoding)) as c:
for byte_block in iter(lambda: c.read(BUF_SIZE), b''):
sha256sum.update(byte_block)
return sha256sum.hexdigest()
def add_nested_key(config: Dict[str, any], path: List[str], value: str) -> bool:
target = path[0].lower()
if len(path) == 1:
@ -263,12 +171,20 @@ def load_config(path: str) -> any:
@click.pass_context
def cli(ctx, debug, config, stdin, remove_prefix, add_prefix):
ctx.ensure_object(dict)
# Propagate the global configs
ctx.obj['DEBUG'] = debug
ctx.obj['CONFIG'] = load_config(config)
# ctx.obj['CONFIG'] = load_config(config)
ctx.obj['READ_STDIN'] = stdin
ctx.obj['REMOVE_PREFIX'] = remove_prefix
ctx.obj['ADD_PREFIX'] = add_prefix
if debug:
update_logging_level(3, LOG)
# Reduce the logging noise for library loggers
update_logging_level(0, "asyncio")
####################
# Generic Commands #
@ -284,6 +200,23 @@ def print_config(ctx):
print(json.dumps(ctx.obj['CONFIG'], indent=2, sort_keys=True))
@cli.command(name="default-config")
@click.argument('profile', default="all")
@click.pass_context
def print_default_config(ctx, profile):
"""
Print the configuration
"""
if profile == "all":
print(get_default_config().json(exclude_none=True, indent=2, sort_keys=True))
else:
config = get_default_config()
profile_names = config.get_profile_names()
if profile in profile_names:
print(config.get_profile(profile).json(exclude_none=True, indent=2, sort_keys=True))
else:
print(f"Profile \"{profile}\" is not in {profile_names}")
###############################
# S3 Storage Focused Commands #
###############################
@ -303,6 +236,7 @@ def list_files(ctx, context, sha256sum, suffix, print_identity):
s3_config = ctx.obj['CONFIG']['s3']
s3_bucket = ctx.obj['CONTEXT']
LOG.debug(f"connecting to s3 {s3_config['']}")
s3 = get_s3_client(s3_config)
if not s3.bucket_exists(s3_bucket):
@ -364,7 +298,7 @@ def check_matched_files_hashes(ctx, context, print_identity, profile, files):
try:
file_object = s3.get_object(s3_bucket, file_identity)
stored_data = json.load(file_object)
stored_profile_hash, stored_file_hash, calculated_file_hash = get_file_sha256sum(
stored_profile_hash, stored_file_hash, calculated_file_hash = get_stored_and_computed_sha256sums(
stored_data, profile, file)
if calculated_file_hash == stored_file_hash \
and ctx.obj['CONFIG']['profileHashes'][profile] == stored_profile_hash:
@ -372,10 +306,13 @@ def check_matched_files_hashes(ctx, context, print_identity, profile, files):
matching_files.append(stored_data['storedAssetIdentity'])
else:
matching_files.append(file)
except NoSuchKey as e:
except S3Error as e:
if e.code == "NoSuchKey":
continue
except ValueError or ResponseError as e:
print(f'ERROR: {file} {e}')
else:
LOG.error(e)
except ValueError or InvalidResponseError as e:
LOG.error(f'ERROR: {file} {e}')
print(os.linesep.join(matching_files))
@ -401,15 +338,18 @@ def check_changed_files_hashes(ctx, context, profile, files):
try:
file_object = s3.get_object(s3_bucket, file_identity)
stored_data = json.load(file_object)
stored_profile_hash, stored_file_hash, calculated_file_hash = get_file_sha256sum(
stored_profile_hash, stored_file_hash, calculated_file_hash = get_stored_and_computed_sha256sums(
stored_data, profile, file)
if calculated_file_hash != stored_file_hash \
or ctx.obj['CONFIG']['profileHashes'][profile] != stored_profile_hash:
changed_files.append(file)
except NoSuchKey as e:
except S3Error as e:
if e.code == "NoSuchKey":
changed_files.append(file)
except ValueError or ResponseError as e:
print(f'ERROR: {file} {e}')
else:
LOG.error(e)
except ValueError or InvalidResponseError as e:
LOG.error(f'ERROR: {file} {e}')
print(os.linesep.join(changed_files))
@ -437,11 +377,7 @@ def update_changed_files_hashes(ctx, context, input_and_identity, profile, files
file, identity = file.split('\t')
file_identity = f'{get_file_identity(ctx.obj, file)}.json'
try:
sha256sum = hashlib.sha256()
with open(file, 'rb') as f:
for byte_block in iter(lambda: f.read(BUF_SIZE), b''):
sha256sum.update(byte_block)
calculated_file_hash = sha256sum.hexdigest()
calculated_file_hash = get_file_sha256sum(file)
object_data = {
"sourcePath": file,
@ -464,8 +400,8 @@ def update_changed_files_hashes(ctx, context, input_and_identity, profile, files
metadata={}
)
updated_files.append(file)
except ValueError or ResponseError as e:
print(f'ERROR: {file} {e}')
except ValueError or InvalidResponseError as e:
LOG.error(f'ERROR: {file} {e}')
print(os.linesep.join(updated_files))
@ -499,8 +435,8 @@ def store_files(ctx, context, files):
ctx.obj['ADD_PREFIX'], file_identity))
else:
stored_files.append(file)
except ResponseError as e:
print(f'ERROR: {file} {e}', file=sys.stderr)
except InvalidResponseError as e:
LOG.error(f'ERROR: {file} {e}', file=sys.stderr)
print(os.linesep.join(stored_files))
@ -533,11 +469,13 @@ def retrieve_files(ctx, context, destination, files):
file_destination
)
retrieved_files.append(file_destination)
except NoSuchKey as e:
print(
f'ERROR: {file_identity} {file_destination} {e}', file=sys.stderr)
except ResponseError as e:
print(f'ERROR: {file_destination} {e}', file=sys.stderr)
except S3Error as e:
if e.code == "NoSuchKey":
LOG.error(f'ERROR: {file_identity} {file_destination} {e}', file=sys.stderr)
else:
LOG.error(e)
except InvalidResponseError as e:
LOG.error(f'ERROR: {file_destination} {e}', file=sys.stderr)
print(os.linesep.join(retrieved_files))
@ -575,10 +513,13 @@ def clean_files(ctx, context, context_data, dry_run, files):
else:
file_object = s3.get_object(s3_bucket, file_identity)
found_files.append(file_identity)
except ResponseError as e:
print(f'ERROR: ResponseError {file_identity} {e}', file=sys.stderr)
except NoSuchKey as e:
print(f'ERROR: NoSuchKey {file_identity}', file=sys.stderr)
except InvalidResponseError as e:
LOG.error(f'ERROR: InvalidResponseError {file_identity} {e}', file=sys.stderr)
except S3Error as e:
if e.code == "NoSuchKey":
LOG.error(f'ERROR: NoSuchKey {file_identity}', file=sys.stderr)
else:
LOG.error(e)
# print(os.linesep.join(found_objects))
# print(os.linesep.join(found_objects))
@ -614,8 +555,8 @@ def clean_files(ctx, context, context_data, dry_run, files):
try:
s3.remove_object(s3_bucket, file_identity)
removed_files.append(f'{s3_bucket}:{file_identity}')
except ResponseError as e:
print(
except InvalidResponseError as e:
LOG.error(
f'ERROR: {s3_bucket}:{file_identity} {e}', file=sys.stderr)
for file_data_identity in found_data_objects:
@ -627,8 +568,8 @@ def clean_files(ctx, context, context_data, dry_run, files):
s3.remove_object(s3_data_bucket, file_data_identity)
removed_files.append(
f'{s3_data_bucket}:{file_data_identity}')
except ResponseError as e:
print(
except InvalidResponseError as e:
LOG.error(
f'ERROR: {s3_data_bucket}:{file_data_identity} {e}', file=sys.stderr)
print(os.linesep.join(removed_files))

0
acm/__init__.py

81
acm/asyncio.py

@ -0,0 +1,81 @@
import asyncio
import logging
import typing
LOG = logging.getLogger("acm.async")
async def run_command_shell(
command,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
on_success: typing.List[typing.Callable] = [()]
):
"""Run command in subprocess (shell).
Note:
This can be used if you wish to execute e.g. "copy"
on Windows, which can only be executed in the shell.
"""
process = await asyncio.create_subprocess_shell(
command, stdout=stdout, stderr=stderr
)
process_stdout, process_stderr = await process.communicate()
if process.returncode == 0:
for success_callable in on_success:
success_callable()
if stdout != asyncio.subprocess.DEVNULL:
result = process_stdout.decode().strip()
return result
else:
return None
def make_chunks(tasks, chunk_size):
"""Yield successive chunk_size-sized chunks from tasks.
Note:
Taken from https://stackoverflow.com/a/312464
modified for python 3 only
"""
for i in range(0, len(tasks), chunk_size):
yield tasks[i: i + chunk_size]
def run_asyncio_commands(tasks, max_concurrent_tasks=0):
"""Run tasks asynchronously using asyncio and return results.
If max_concurrent_tasks are set to 0, no limit is applied.
Note:
By default, Windows uses SelectorEventLoop, which does not support
subprocesses. Therefore ProactorEventLoop is used on Windows.
https://docs.python.org/3/library/asyncio-eventloops.html#windows
"""
all_results = []
if max_concurrent_tasks == 0:
chunks = [tasks]
num_chunks = len(chunks)
else:
chunks = make_chunks(tasks=tasks, chunk_size=max_concurrent_tasks)
num_chunks = len(
list(make_chunks(tasks=tasks, chunk_size=max_concurrent_tasks)))
if asyncio.get_event_loop().is_closed():
asyncio.set_event_loop(asyncio.new_event_loop())
if platform.system() == "Windows":
asyncio.set_event_loop(asyncio.ProactorEventLoop())
loop = asyncio.get_event_loop()
chunk = 1
for tasks_in_chunk in chunks:
commands = asyncio.gather(*tasks_in_chunk)
results = loop.run_until_complete(commands)
all_results += results
chunk += 1
loop.close()
return all_results

311
acm/config.py

@ -0,0 +1,311 @@
import importlib.metadata
import json
import logging
import typing
from pydantic import BaseModel, BaseSettings, validator
from acm.utility import get_string_sha256sum, get_string_xor
from acm.version import VERSION
LOG = logging.getLogger("acm.config")
class ACMProfileProcessorOptions(BaseModel):
force_preserve_smaller_input: bool = False
class ACMProfileProcessor(BaseModel):
name: str
version: typing.Optional[str]
processors: typing.List[str]
extensions: typing.List[str]
output_extension: str
options: ACMProfileProcessorOptions
command: str
signature: typing.Optional[str]
@validator('version', always=True)
def version_validator(cls, v, values) -> str:
# TODO Set the version to the app version if not provided
if v is None:
return VERSION
@validator('signature', always=True)
def signature_validator(cls, v, values) -> str:
signature_keys = ["name", "version", "processors", "extensions", "output_extension", "command"]
signature_values = [value for key, value in values.items() if key in signature_keys]
return get_string_sha256sum(json.dumps(signature_values))
class ACMProfile(BaseModel):
name: str
version: typing.Optional[str]
processors: typing.List[ACMProfileProcessor]
signature: typing.Optional[str]
@validator('version', always=True)
def version_validator(cls, v, values) -> str:
if v is None:
return VERSION
@validator('signature', always=True)
def hash_signature_validator(cls, v, values) -> str:
signature_keys = ["name", "version"]
signature_values = [value for key, value in values.items() if key in signature_keys]
signature = get_string_sha256sum(json.dumps(signature_values))
processor_signatures = [processor.signature for processor in values["processors"]]
if len(processor_signatures) > 1:
combined_processor_signature = get_string_xor(*processor_signatures)
else:
combined_processor_signature = processor_signatures[0]
return get_string_sha256sum(signature + combined_processor_signature)
def get_processor_names(self) -> typing.List[str]:
return [processor.name for processor in self.processors]
def get_processor(self, name: str) -> typing.Optional[ACMProfileProcessor]:
for processor in self.processors:
if name == processor.name:
return processor
return None
class ACMS3(BaseModel):
secure: bool = False,
host: str = "127.0.0.1:9000"
access_key: typing.Optional[str]
secret_key: typing.Optional[str]
class ACMConfig(BaseSettings):
concurrency: int = 0
debug: bool = False
s3: typing.Optional[ACMS3]
version: typing.Optional[str]
profiles: typing.List[ACMProfile]
signature: typing.Optional[str]
@validator('version', always=True)
def version_validator(cls, v, values) -> str:
if v is None:
return VERSION
@validator('signature', always=True)
def signature_validator(cls, v, values) -> str:
signature_keys = ["version"]
signature_values = [value for key, value in values.items() if key in signature_keys]
signature = get_string_sha256sum(json.dumps(signature_values))
profiles_signatures = [profiles.signature for profiles in values["profiles"]]
if len(profiles_signatures) > 1:
combined_profiles_signature = get_string_xor(*profiles_signatures)
else:
combined_profiles_signature = profiles_signatures[0]
return get_string_sha256sum(signature + combined_profiles_signature)
class Config:
env_prefix = 'ACM_'
env_nested_delimiter = '__'
def get_profile_names(self) -> typing.List[str]:
return [profile.name for profile in self.profiles]
def get_profile(self, name: str) -> typing.Optional[ACMProfile]:
for profile in self.profiles:
if name == profile.name:
return profile
return None
def get_default_config():
"""
Returns the default ACM config
"""
acm_profiles = []
# default #
acm_default_processors = []
acm_default_processors.append(ACMProfileProcessor(
name = "jpeg",
processors = ["cjpeg"],
extensions = ["jpg", "jpeg"],
output_extension = "jpg",
options = ACMProfileProcessorOptions(force_preserve_smaller_input=True),
command = "cjpeg -optimize -quality 90 -progressive -outfile {output_file} {input_file}"
))
acm_default_processors.append(ACMProfileProcessor(
name = "png",
processors = ["optipng"],
extensions = ["png"],
output_extension = "png",
options = ACMProfileProcessorOptions(force_preserve_smaller_input=True),
command = "optipng -o2 -strip all -out {output_file} {input_file}"
))
acm_default_processors.append(ACMProfileProcessor(
name = "video",
processors = ["ffmpeg"],
extensions = ["mp4","webm"],
output_extension = "webm",
options = ACMProfileProcessorOptions(),
command = "optipng -o2 -strip all -out {output_file} {input_file}"
))
acm_default_processors.append(ACMProfileProcessor(
name = "audio",
processors = ["ffmpeg","opusenc"],
extensions = ["wav","mp3"],
output_extension = "ogg",
options = ACMProfileProcessorOptions(),
command = "optipng -o2 -strip all -out {output_file} {input_file}"
))
acm_profiles.append(ACMProfile(
name = "default",
processors = acm_default_processors
))
# placebo #
acm_placebo_processors = []
acm_placebo_processors.append(ACMProfileProcessor(
name = "jpeg",
processors = ["cjpeg"],
extensions = ["jpg", "jpeg"],
output_extension = "jpg",
options = ACMProfileProcessorOptions(),
command = "cp {input_file} {output_file}"
))
acm_placebo_processors.append(ACMProfileProcessor(
name = "png",
processors = ["optipng"],
extensions = ["png"],
output_extension = "png",
options = ACMProfileProcessorOptions(),
command = "cp {input_file} {output_file}"
))
acm_placebo_processors.append(ACMProfileProcessor(
name = "video",
processors = ["ffmpeg"],
extensions = ["mp4","webm"],
output_extension = "webm",
options = ACMProfileProcessorOptions(),
command = "cp {input_file} {output_file}"
))
acm_placebo_processors.append(ACMProfileProcessor(
name = "audio",
processors = ["ffmpeg","opusenc"],
extensions = ["wav","mp3"],
output_extension = "ogg",
options = ACMProfileProcessorOptions(),
command = "cp {input_file} {output_file}"
))
acm_profiles.append(ACMProfile(
name = "placebo",
processors = acm_placebo_processors
))
# webp #
acm_webp_processors = []
acm_webp_processors.append(ACMProfileProcessor(
name = "jpeg",
processors = ["cwebp"],
extensions = ["jpg", "jpeg"],
output_extension = "jpg",
options = ACMProfileProcessorOptions(),
command = "cwebp -jpeg_like -q 90 -o {output_file} {input_file}"
))
acm_webp_processors.append(ACMProfileProcessor(
name = "png",
processors = ["cwebp"],
extensions = ["png"],
output_extension = "png",
options = ACMProfileProcessorOptions(),
command = "cwebp -lossless -o {output_file} {input_file}"
))
acm_profiles.append(ACMProfile(
name = "webp",
processors = acm_webp_processors
))
# aggressive #
acm_aggressive_processors = []
acm_aggressive_processors.append(ACMProfileProcessor(
name = "jpeg",
processors = ["cjpeg"],
extensions = ["jpg", "jpeg"],
output_extension = "jpg",
options = ACMProfileProcessorOptions(force_preserve_smaller_input=True),
command = "export FILE={output_file} && export TEMP_FILE=${FILE}_tmp.jpg && ffmpeg -i {input_file} -vf scale=-1:720 ${TEMP_FILE} && cjpeg -optimize -quality 75 -progressive -outfile {output_file} ${TEMP_FILE} && rm ${TEMP_FILE}"
))
acm_aggressive_processors.append(ACMProfileProcessor(
name = "png",
processors = ["optipng"],
extensions = ["png"],
output_extension = "png",
options = ACMProfileProcessorOptions(force_preserve_smaller_input=True),
command = "optipng -o2 -strip all -out {output_file} {input_file}"
))
acm_aggressive_processors.append(ACMProfileProcessor(
name = "video",
processors = ["ffmpeg"],
extensions = ["mp4","webm"],
output_extension = "webm",
options = ACMProfileProcessorOptions(),
command = "ffmpeg -hide_banner -loglevel panic -i {input_file} -vf scale=-1:720 -c:v libvpx-vp9 -b:v 0 -crf 38 -c:a libopus {output_file}"
))
acm_aggressive_processors.append(ACMProfileProcessor(
name = "audio",
processors = ["ffmpeg","opusenc"],
extensions = ["wav","mp3"],
output_extension = "ogg",
options = ACMProfileProcessorOptions(),
command = "ffmpeg -hide_banner -loglevel panic -i {input_file} -f wav -| opusenc --bitrate 64 --vbr --downmix-stereo --discard-comments --discard-pictures - {output_file}"
))
acm_profiles.append(ACMProfile(
name = "aggressive",
processors = acm_aggressive_processors
))
# aggressive-webp #
acm_aggressive_webp_processors = []
acm_aggressive_webp_processors.append(ACMProfileProcessor(
name = "jpeg",
processors = ["cwebp"],
extensions = ["jpg", "jpeg"],
output_extension = "jpg",
options = ACMProfileProcessorOptions(),
command = "export FILE={output_file} && export TEMP_FILE=${FILE}_tmp.jpg && ffmpeg -i {input_file} -vf scale=-1:720 ${TEMP_FILE} && cwebp -jpeg_like -q 75 -o {output_file} ${TEMP_FILE} && rm ${TEMP_FILE}"
))
acm_aggressive_webp_processors.append(ACMProfileProcessor(
name = "png",
processors = ["optipng"],
extensions = ["png"],
output_extension = "png",
options = ACMProfileProcessorOptions(),
command = "cwebp -o {output_file} ${input_file}"
))
acm_aggressive_webp_processors.append(ACMProfileProcessor(
name = "video",
processors = ["ffmpeg"],
extensions = ["mp4","webm"],
output_extension = "webm",
options = ACMProfileProcessorOptions(),
command = "ffmpeg -hide_banner -loglevel panic -i {input_file} -vf scale=-1:720 -c:v libvpx-vp9 -b:v 0 -crf 38 -c:a libopus {output_file}"
))
acm_aggressive_webp_processors.append(ACMProfileProcessor(
name = "audio",
processors = ["ffmpeg","opusenc"],
extensions = ["wav","mp3"],
output_extension = "ogg",
options = ACMProfileProcessorOptions(),
command = "ffmpeg -hide_banner -loglevel panic -i {input_file} -f wav -| opusenc --bitrate 64 --vbr --downmix-stereo --discard-comments --discard-pictures - {output_file}"
))
acm_profiles.append(ACMProfile(
name = "aggressive-webp",
processors = acm_aggressive_webp_processors
))
return ACMConfig(
profiles=acm_profiles
)

46
acm/logging.py

@ -0,0 +1,46 @@
import logging
LOG = logging.getLogger("acm.logging")
def setup_basic_logging(
logger_name,
logger_level = logging.ERROR,
log_format = "%(asctime)s - %(name)s - %(levelname)s - %(message)s",
date_format="%Y-%m-%dT%H:%M:%S%Z",
default_level = logging.INFO,
):
"""
Initialize logging with sane defaults
"""
logging.basicConfig(
format=log_format,
datefmt=date_format,
level=default_level
)
configured_logger = logging.getLogger(logger_name)
configured_logger.setLevel(logger_level)
def update_logging_level(verbosity: int = 0, *loggers):
"""
Configure logging based on the requested verbosity
"""
if verbosity > 2:
logging_level = logging.DEBUG
elif verbosity > 1:
logging_level = logging.INFO
elif verbosity > 0:
logging_level = logging.WARN
elif verbosity == 0:
logging_level = logging.ERROR
elif verbosity < 0:
logging_level = logging.CRITICAL
for logger in loggers:
if isinstance(logger, logging.Logger) or isinstance(logger, logging.Handler):
logger.setLevel(logging_level)
else:
logger_instance = logging.getLogger(logger)
logger_instance.setLevel(logging_level)
LOG.debug("Set logging level for to %s", logging_level)

3
acm/s3.py

@ -0,0 +1,3 @@
import logging
LOG = logging.getLogger("acm.s3")

39
acm/utility.py

@ -0,0 +1,39 @@
import hashlib
import io
import logging
# Size of the buffer to read files with
BUF_SIZE = 4096
LOG = logging.getLogger("acm.utility")
def get_file_sha256sum(input_file):
sha256sum = hashlib.sha256()
with open(input_file, 'rb') as f:
for byte_block in iter(lambda: f.read(BUF_SIZE), b""):
sha256sum.update(byte_block)
return sha256sum.hexdigest()
def get_string_sha256sum(content: str, encoding='utf-8') -> str:
sha256sum = hashlib.sha256()
with io.BytesIO(content.encode(encoding)) as c:
for byte_block in iter(lambda: c.read(BUF_SIZE), b''):
sha256sum.update(byte_block)
return sha256sum.hexdigest()
def get_string_hex(content: str) -> hex:
return hex(int(content, base=16))
def get_hex_xor(first: hex, second: hex) -> hex:
return hex(int(first, base=16) ^ int(second, base=16))
def get_string_xor(first: str, second: str, *extra: str) -> str:
result = get_hex_xor(get_string_hex(first), get_string_hex(second))
for next_hex in extra:
result = get_hex_xor(result, get_string_hex(next_hex))
return str(result)

31
acm/version.py

@ -0,0 +1,31 @@
import importlib.metadata
import logging
import os
import pathlib
import toml
LOG = logging.getLogger("acm.version")
def __get_version():
"""
Automatically determine the version of the application being run
"""
# Attempt to read the installed package information
try:
return importlib.metadata.version('asset-compression-manager')
except importlib.metadata.PackageNotFoundError:
LOG.debug("The package is not installed, reading the version from another source")
# Fallback on parsing the pyproject.toml file
root_dir = pathlib.Path(__file__).parent.parent.resolve()
with open(os.path.join(root_dir, "pyproject.toml"), "r") as project_file:
project = toml.load(project_file)
return project["tool"]["poetry"]["version"]
LOG.debug("Falling back on UNKNOWN version identifier")
return "UNKNOWN"
# Application Version
VERSION = __get_version()

10
docker/entrypoint.sh

@ -1,3 +1,9 @@
#! /bin/sh
#!/usr/bin/env bash
acm $@
options=""
if [[ "${ACM_DEBUG}" == "true" ]]; then
options="--debug $options"
fi
acm ${options} $@

141
poetry.lock

@ -0,0 +1,141 @@
[[package]]
name = "certifi"
version = "2021.10.8"
description = "Python package for providing Mozilla's CA Bundle."
category = "main"
optional = false
python-versions = "*"
[[package]]
name = "click"
version = "8.0.3"
description = "Composable command line interface toolkit"
category = "main"
optional = false
python-versions = ">=3.6"
[package.dependencies]
colorama = {version = "*", markers = "platform_system == \"Windows\""}
[[package]]
name = "colorama"
version = "0.4.4"
description = "Cross-platform colored terminal text."
category = "main"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
[[package]]
name = "minio"
version = "7.1.2"
description = "MinIO Python SDK for Amazon S3 Compatible Cloud Storage"
category = "main"
optional = false
python-versions = "*"
[package.dependencies]
certifi = "*"
urllib3 = "*"
[[package]]
name = "pydantic"
version = "1.8.2"
description = "Data validation and settings management using python 3.6 type hinting"
category = "main"
optional = false
python-versions = ">=3.6.1"
[package.dependencies]
typing-extensions = ">=3.7.4.3"
[package.extras]
dotenv = ["python-dotenv (>=0.10.4)"]
email = ["email-validator (>=1.0.3)"]
[[package]]
name = "toml"
version = "0.10.2"
description = "Python Library for Tom's Obvious, Minimal Language"
category = "main"
optional = false
python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*"
[[package]]
name = "typing-extensions"
version = "4.0.1"
description = "Backported and Experimental Type Hints for Python 3.6+"
category = "main"
optional = false
python-versions = ">=3.6"
[[package]]
name = "urllib3"
version = "1.26.7"
description = "HTTP library with thread-safe connection pooling, file post, and more."
category = "main"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, <4"
[package.extras]
brotli = ["brotlipy (>=0.6.0)"]
secure = ["pyOpenSSL (>=0.14)", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "certifi", "ipaddress"]
socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"]
[metadata]
lock-version = "1.1"
python-versions = "^3.8"
content-hash = "751276ba1ea83218a27169c8d996edf4ae2f3c7a648d012674dd8ac2431508e4"
[metadata.files]
certifi = [
{file = "certifi-2021.10.8-py2.py3-none-any.whl", hash = "sha256:d62a0163eb4c2344ac042ab2bdf75399a71a2d8c7d47eac2e2ee91b9d6339569"},
{file = "certifi-2021.10.8.tar.gz", hash = "sha256:78884e7c1d4b00ce3cea67b44566851c4343c120abd683433ce934a68ea58872"},
]
click = [
{file = "click-8.0.3-py3-none-any.whl", hash = "sha256:353f466495adaeb40b6b5f592f9f91cb22372351c84caeb068132442a4518ef3"},
{file = "click-8.0.3.tar.gz", hash = "sha256:410e932b050f5eed773c4cda94de75971c89cdb3155a72a0831139a79e5ecb5b"},
]
colorama = [
{file = "colorama-0.4.4-py2.py3-none-any.whl", hash = "sha256:9f47eda37229f68eee03b24b9748937c7dc3868f906e8ba69fbcbdd3bc5dc3e2"},
{file = "colorama-0.4.4.tar.gz", hash = "sha256:5941b2b48a20143d2267e95b1c2a7603ce057ee39fd88e7329b0c292aa16869b"},
]
minio = [
{file = "minio-7.1.2-py3-none-any.whl", hash = "sha256:51318733496f37617bebfefe116453406a0d5afc6add8c421df07f32e0843c2b"},
{file = "minio-7.1.2.tar.gz", hash = "sha256:40d0cdb4dba5d5610d6599ea740cf827102db5bfa71279fc220c3cf7305bedc1"},
]
pydantic = [
{file = "pydantic-1.8.2-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:05ddfd37c1720c392f4e0d43c484217b7521558302e7069ce8d318438d297739"},
{file = "pydantic-1.8.2-cp36-cp36m-manylinux1_i686.whl", hash = "sha256:a7c6002203fe2c5a1b5cbb141bb85060cbff88c2d78eccbc72d97eb7022c43e4"},
{file = "pydantic-1.8.2-cp36-cp36m-manylinux2014_i686.whl", hash = "sha256:589eb6cd6361e8ac341db97602eb7f354551482368a37f4fd086c0733548308e"},
{file = "pydantic-1.8.2-cp36-cp36m-manylinux2014_x86_64.whl", hash = "sha256:10e5622224245941efc193ad1d159887872776df7a8fd592ed746aa25d071840"},
{file = "pydantic-1.8.2-cp36-cp36m-win_amd64.whl", hash = "sha256:99a9fc39470010c45c161a1dc584997f1feb13f689ecf645f59bb4ba623e586b"},
{file = "pydantic-1.8.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:a83db7205f60c6a86f2c44a61791d993dff4b73135df1973ecd9eed5ea0bda20"},
{file = "pydantic-1.8.2-cp37-cp37m-manylinux1_i686.whl", hash = "sha256:41b542c0b3c42dc17da70554bc6f38cbc30d7066d2c2815a94499b5684582ecb"},
{file = "pydantic-1.8.2-cp37-cp37m-manylinux2014_i686.whl", hash = "sha256:ea5cb40a3b23b3265f6325727ddfc45141b08ed665458be8c6285e7b85bd73a1"},
{file = "pydantic-1.8.2-cp37-cp37m-manylinux2014_x86_64.whl", hash = "sha256:18b5ea242dd3e62dbf89b2b0ec9ba6c7b5abaf6af85b95a97b00279f65845a23"},
{file = "pydantic-1.8.2-cp37-cp37m-win_amd64.whl", hash = "sha256:234a6c19f1c14e25e362cb05c68afb7f183eb931dd3cd4605eafff055ebbf287"},
{file = "pydantic-1.8.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:021ea0e4133e8c824775a0cfe098677acf6fa5a3cbf9206a376eed3fc09302cd"},
{file = "pydantic-1.8.2-cp38-cp38-manylinux1_i686.whl", hash = "sha256:e710876437bc07bd414ff453ac8ec63d219e7690128d925c6e82889d674bb505"},
{file = "pydantic-1.8.2-cp38-cp38-manylinux2014_i686.whl", hash = "sha256:ac8eed4ca3bd3aadc58a13c2aa93cd8a884bcf21cb019f8cfecaae3b6ce3746e"},
{file = "pydantic-1.8.2-cp38-cp38-manylinux2014_x86_64.whl", hash = "sha256:4a03cbbe743e9c7247ceae6f0d8898f7a64bb65800a45cbdc52d65e370570820"},
{file = "pydantic-1.8.2-cp38-cp38-win_amd64.whl", hash = "sha256:8621559dcf5afacf0069ed194278f35c255dc1a1385c28b32dd6c110fd6531b3"},
{file = "pydantic-1.8.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8b223557f9510cf0bfd8b01316bf6dd281cf41826607eada99662f5e4963f316"},
{file = "pydantic-1.8.2-cp39-cp39-manylinux1_i686.whl", hash = "sha256:244ad78eeb388a43b0c927e74d3af78008e944074b7d0f4f696ddd5b2af43c62"},
{file = "pydantic-1.8.2-cp39-cp39-manylinux2014_i686.whl", hash = "sha256:05ef5246a7ffd2ce12a619cbb29f3307b7c4509307b1b49f456657b43529dc6f"},
{file = "pydantic-1.8.2-cp39-cp39-manylinux2014_x86_64.whl", hash = "sha256:54cd5121383f4a461ff7644c7ca20c0419d58052db70d8791eacbbe31528916b"},
{file = "pydantic-1.8.2-cp39-cp39-win_amd64.whl", hash = "sha256:4be75bebf676a5f0f87937c6ddb061fa39cbea067240d98e298508c1bda6f3f3"},
{file = "pydantic-1.8.2-py3-none-any.whl", hash = "sha256:fec866a0b59f372b7e776f2d7308511784dace622e0992a0b59ea3ccee0ae833"},
{file = "pydantic-1.8.2.tar.gz", hash = "sha256:26464e57ccaafe72b7ad156fdaa4e9b9ef051f69e175dbbb463283000c05ab7b"},
]
toml = [
{file = "toml-0.10.2-py2.py3-none-any.whl", hash = "sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b"},
{file = "toml-0.10.2.tar.gz", hash = "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f"},
]
typing-extensions = [
{file = "typing_extensions-4.0.1-py3-none-any.whl", hash = "sha256:7f001e5ac290a0c0401508864c7ec868be4e701886d5b573a9528ed3973d9d3b"},
{file = "typing_extensions-4.0.1.tar.gz", hash = "sha256:4ca091dea149f945ec56afb48dae714f21e8692ef22a395223bcd328961b6a0e"},
]
urllib3 = [
{file = "urllib3-1.26.7-py2.py3-none-any.whl", hash = "sha256:c4fdf4019605b6e5423637e01bc9fe4daef873709a7973e195ceba0a62bbc844"},
{file = "urllib3-1.26.7.tar.gz", hash = "sha256:4987c65554f7a2dbf30c18fd48778ef124af6fab771a377103da0585e2336ece"},
]

19
pyproject.toml

@ -0,0 +1,19 @@
[tool.poetry]
name = "asset-compression-manager"
version = "2.0.0"
description = "Helper Utility For Managing Compressed Assets"
authors = ["Drew Short <warrick@sothr.com>"]
license = "Apache2"
[tool.poetry.dependencies]
python = "^3.8"
click = "8.0.3"
minio = "7.1.2"
pydantic = "1.8.2"
toml = "0.10.2"
[tool.poetry.dev-dependencies]
[build-system]
requires = ["poetry-core>=1.0.0"]
build-backend = "poetry.core.masonry.api"

2
requirements.txt

@ -1,2 +0,0 @@
click == 7.1.1
minio == 5.0.8

9
scripts/build_container.sh

@ -0,0 +1,9 @@
#!/usr/bin/env bash
TAG=${TAG:-dev}
if [[ -n "$1" ]]; then
TAG="$1"
fi
DOCKER_BUILDKIT=1
docker build --platform=amd64 -t sothr/acm:${TAG} .

0
upload_pipeline.sh → scripts/upload_pipeline.sh

2
setup.py

@ -4,7 +4,7 @@ from distutils.core import setup
setup(
name='Asset-Compression-Manager',
version='1.4.0',
version='2.0.0',
description='Helper Utility For Managing Compressed Assets',
author='Drew Short',
author_email='warrick@sothr.com'
Loading…
Cancel
Save