Compare commits
No commits in common. 'main' and 'fix-disco-xform-utils-import-error' have entirely different histories.
main
...
fix-disco-
10 changed files with 951 additions and 2725 deletions
@ -1,146 +0,0 @@ |
||||
# Disco-specfic ignores |
||||
init_images/* |
||||
images_out/* |
||||
MiDaS/ |
||||
models/ |
||||
pretrained/* |
||||
settings.json |
||||
|
||||
# Byte-compiled / optimized / DLL files |
||||
__pycache__/ |
||||
*.py[cod] |
||||
*$py.class |
||||
|
||||
# C extensions |
||||
*.so |
||||
|
||||
# Distribution / packaging |
||||
.Python |
||||
build/ |
||||
develop-eggs/ |
||||
dist/ |
||||
downloads/ |
||||
eggs/ |
||||
.eggs/ |
||||
lib/ |
||||
lib64/ |
||||
parts/ |
||||
sdist/ |
||||
var/ |
||||
wheels/ |
||||
share/python-wheels/ |
||||
*.egg-info/ |
||||
.installed.cfg |
||||
*.egg |
||||
MANIFEST |
||||
|
||||
# PyInstaller |
||||
# Usually these files are written by a python script from a template |
||||
# before PyInstaller builds the exe, so as to inject date/other infos into it. |
||||
*.manifest |
||||
*.spec |
||||
|
||||
# Installer logs |
||||
pip-log.txt |
||||
pip-delete-this-directory.txt |
||||
|
||||
# Unit test / coverage reports |
||||
htmlcov/ |
||||
.tox/ |
||||
.nox/ |
||||
.coverage |
||||
.coverage.* |
||||
.cache |
||||
nosetests.xml |
||||
coverage.xml |
||||
*.cover |
||||
*.py,cover |
||||
.hypothesis/ |
||||
.pytest_cache/ |
||||
cover/ |
||||
|
||||
# Translations |
||||
*.mo |
||||
*.pot |
||||
|
||||
# Django stuff: |
||||
*.log |
||||
local_settings.py |
||||
db.sqlite3 |
||||
db.sqlite3-journal |
||||
|
||||
# Flask stuff: |
||||
instance/ |
||||
.webassets-cache |
||||
|
||||
# Scrapy stuff: |
||||
.scrapy |
||||
|
||||
# Sphinx documentation |
||||
docs/_build/ |
||||
|
||||
# PyBuilder |
||||
.pybuilder/ |
||||
target/ |
||||
|
||||
# Jupyter Notebook |
||||
.ipynb_checkpoints |
||||
|
||||
# IPython |
||||
profile_default/ |
||||
ipython_config.py |
||||
|
||||
# pyenv |
||||
# For a library or package, you might want to ignore these files since the code is |
||||
# intended to run in multiple environments; otherwise, check them in: |
||||
# .python-version |
||||
|
||||
# pipenv |
||||
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. |
||||
# However, in case of collaboration, if having platform-specific dependencies or dependencies |
||||
# having no cross-platform support, pipenv may install dependencies that don't work, or not |
||||
# install all needed dependencies. |
||||
#Pipfile.lock |
||||
|
||||
# PEP 582; used by e.g. github.com/David-OConnor/pyflow |
||||
__pypackages__/ |
||||
|
||||
# Celery stuff |
||||
celerybeat-schedule |
||||
celerybeat.pid |
||||
|
||||
# SageMath parsed files |
||||
*.sage.py |
||||
|
||||
# Environments |
||||
.env |
||||
.venv |
||||
env/ |
||||
venv/ |
||||
ENV/ |
||||
env.bak/ |
||||
venv.bak/ |
||||
|
||||
# Spyder project settings |
||||
.spyderproject |
||||
.spyproject |
||||
|
||||
# Rope project settings |
||||
.ropeproject |
||||
|
||||
# mkdocs documentation |
||||
/site |
||||
|
||||
# mypy |
||||
.mypy_cache/ |
||||
.dmypy.json |
||||
dmypy.json |
||||
|
||||
# Pyre type checker |
||||
.pyre/ |
||||
|
||||
# pytype static type analyzer |
||||
.pytype/ |
||||
|
||||
# Cython debug symbols |
||||
cython_debug/ |
File diff suppressed because it is too large
Load Diff
@ -1,24 +0,0 @@ |
||||
import subprocess |
||||
from importlib import util as importlibutil |
||||
|
||||
def module_exists(module_name): |
||||
return importlibutil.find_spec(module_name) |
||||
|
||||
def gitclone(url, targetdir=None): |
||||
if targetdir: |
||||
res = subprocess.run(['git', 'clone', url, targetdir], stdout=subprocess.PIPE).stdout.decode('utf-8') |
||||
else: |
||||
res = subprocess.run(['git', 'clone', url], stdout=subprocess.PIPE).stdout.decode('utf-8') |
||||
print(res) |
||||
|
||||
def pipi(modulestr): |
||||
res = subprocess.run(['pip', 'install', modulestr], stdout=subprocess.PIPE).stdout.decode('utf-8') |
||||
print(res) |
||||
|
||||
def pipie(modulestr): |
||||
res = subprocess.run(['git', 'install', '-e', modulestr], stdout=subprocess.PIPE).stdout.decode('utf-8') |
||||
print(res) |
||||
|
||||
def wget(url, outputdir): |
||||
res = subprocess.run(['wget', url, '-P', f'{outputdir}'], stdout=subprocess.PIPE).stdout.decode('utf-8') |
||||
print(res) |
@ -1,47 +0,0 @@ |
||||
# Docker |
||||
|
||||
## Introduction |
||||
|
||||
This is a Docker build file that will preinstall dependencies, packages, Git repos, and pre-cache the large model files needed by Disco Diffusion. |
||||
|
||||
## TO-DO: |
||||
|
||||
- Make container actually accept parameters on run. Right now you'll just be seeing lighthouses. |
||||
|
||||
## Change Log |
||||
|
||||
- `1.0` |
||||
|
||||
Initial build file created based on the DD 5.1 Git repo. This initial build is deliberately meant to work touch-free of any of the existing Python code written. It does handle some of the pre-setup tasks already done in the Python code such as pip packages, Git clones, and even pre-caching the model files for faster launch speed. |
||||
|
||||
## Build the Prep Image |
||||
The prep image is broken out from the `main` folder's `Dockerfile` to help with long build context times (or wget download times after intitial build.) This prep image build contains all the large model files required by Disco Diffusion. |
||||
|
||||
From a terminal in the `docker/prep` directory, run: |
||||
```sh |
||||
docker build -t disco-diffusion-prep:5.1 . |
||||
``` |
||||
From a terminal in the `docker/main` directory, run: |
||||
## Build the Image |
||||
From a terminal, run: |
||||
|
||||
```sh |
||||
docker build -t disco-diffusion:5.1 . |
||||
``` |
||||
|
||||
## Run as a Container |
||||
|
||||
This example runs Disco Diffusion in a Docker container. It maps `images_out` and `init_images` to the container's working directory to access by the host OS. |
||||
```sh |
||||
docker run --rm -it \ |
||||
-v $(echo ~)/disco-diffusion/images_out:/workspace/code/images_out \ |
||||
-v $(echo ~)/disco-diffusion/init_images:/workspace/code/init_images \ |
||||
--gpus=all \ |
||||
--name="disco-diffusion" --ipc=host \ |
||||
--user $(id -u):$(id -g) \ |
||||
disco-diffusion:5.1 python disco-diffusion/disco.py |
||||
``` |
||||
|
||||
## Passing Parameters |
||||
|
||||
This will be added after conferring with repo authors. |
@ -1,40 +0,0 @@ |
||||
# Model prep phase, also cuts down on build context wait time since these models files |
||||
# are large and prone to take long to copy... |
||||
FROM disco-diffusion-prep:5.1 AS modelprep |
||||
|
||||
FROM nvcr.io/nvidia/pytorch:21.08-py3 |
||||
|
||||
ENV PYTHONDONTWRITEBYTECODE 1 |
||||
ENV PYTHONUNBUFFERED 1 |
||||
|
||||
# Install a few dependencies |
||||
RUN apt update |
||||
RUN DEBIAN_FRONTEND=noninteractive TZ=Etc/UTC apt-get -y install -y tzdata imagemagick |
||||
|
||||
# Create a disco user |
||||
RUN useradd -ms /bin/bash disco |
||||
USER disco |
||||
|
||||
# Set up code directory |
||||
RUN mkdir code |
||||
WORKDIR /workspace/code |
||||
|
||||
# Copy over models used |
||||
COPY --from=modelprep /scratch/models /workspace/code/models |
||||
COPY --from=modelprep /scratch/pretrained /workspace/code/pretrained |
||||
|
||||
# Clone Git repositories |
||||
RUN git clone https://github.com/alembics/disco-diffusion.git && \ |
||||
git clone https://github.com/openai/CLIP && \ |
||||
git clone https://github.com/assafshocher/ResizeRight.git && \ |
||||
git clone https://github.com/MSFTserver/pytorch3d-lite.git && \ |
||||
git clone https://github.com/isl-org/MiDaS.git && \ |
||||
git clone https://github.com/crowsonkb/guided-diffusion.git && \ |
||||
git clone https://github.com/shariqfarooq123/AdaBins.git |
||||
|
||||
# Install Python packages |
||||
RUN pip install imageio imageio-ffmpeg==0.4.4 pyspng==0.1.0 lpips datetime timm ipywidgets omegaconf>=2.0.0 pytorch-lightning>=1.0.8 torch-fidelity einops wandb pandas ftfy |
||||
|
||||
# Precache other big files |
||||
COPY --chown=disco --from=modelprep /scratch/clip /home/disco/.cache/clip |
||||
COPY --chown=disco --from=modelprep /scratch/model-lpips/vgg16-397923af.pth /home/disco/.cache/torch/hub/checkpoints/vgg16-397923af.pth |
@ -1,25 +0,0 @@ |
||||
FROM nvcr.io/nvidia/pytorch:21.08-py3 AS prep |
||||
RUN mkdir -p /scratch/models && \ |
||||
mkdir -p /scratch/models/superres && \ |
||||
mkdir -p /scratch/models/slip && \ |
||||
mkdir -p /scratch/model-lpips && \ |
||||
mkdir -p /scratch/clip && \ |
||||
mkdir -p /scratch/pretrained |
||||
|
||||
RUN wget --progress=bar:force:noscroll -P /scratch/model-lpips https://download.pytorch.org/models/vgg16-397923af.pth |
||||
|
||||
RUN wget --no-directories --progress=bar:force:noscroll -P /scratch/models https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt |
||||
RUN wget --no-directories --progress=bar:force:noscroll -P /scratch/models https://v-diffusion.s3.us-west-2.amazonaws.com/512x512_diffusion_uncond_finetune_008100.pt |
||||
RUN wget --no-directories --progress=bar:force:noscroll -P /scratch/models https://openaipublic.blob.core.windows.net/diffusion/jul-2021/256x256_diffusion_uncond.pt |
||||
RUN wget --no-directories --progress=bar:force:noscroll -P /scratch/models https://v-diffusion.s3.us-west-2.amazonaws.com/secondary_model_imagenet_2.pth |
||||
|
||||
RUN wget --no-directories --progress=bar:force:noscroll -P /scratch/pretrained https://cloudflare-ipfs.com/ipfs/Qmd2mMnDLWePKmgfS8m6ntAg4nhV5VkUyAydYBp8cWWeB7/AdaBins_nyu.pt |
||||
|
||||
RUN wget --no-directories --progress=bar:force:noscroll -P /scratch/clip/ https://openaipublic.azureedge.net/clip/models/afeb0e10f9e5a86da6080e35cf09123aca3b358a0c3e3b6c78a7b63bc04b6762/RN50.pt |
||||
RUN wget --no-directories --progress=bar:force:noscroll -P /scratch/clip https://openaipublic.azureedge.net/clip/models/8fa8567bab74a42d41c5915025a8e4538c3bdbe8804a470a72f30b0d94fab599/RN101.pt |
||||
RUN wget --no-directories --progress=bar:force:noscroll -P /scratch/clip https://openaipublic.azureedge.net/clip/models/7e526bd135e493cef0776de27d5f42653e6b4c8bf9e0f653bb11773263205fdd/RN50x4.pt |
||||
RUN wget --no-directories --progress=bar:force:noscroll -P /scratch/clip https://openaipublic.azureedge.net/clip/models/52378b407f34354e150460fe41077663dd5b39c54cd0bfd2b27167a4a06ec9aa/RN50x16.pt |
||||
RUN wget --no-directories --progress=bar:force:noscroll -P /scratch/clip https://openaipublic.azureedge.net/clip/models/be1cfb55d75a9666199fb2206c106743da0f6468c9d327f3e0d0a543a9919d9c/RN50x64.pt |
||||
RUN wget --no-directories --progress=bar:force:noscroll -P /scratch/clip https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt |
||||
RUN wget --no-directories --progress=bar:force:noscroll -P /scratch/clip https://openaipublic.azureedge.net/clip/models/5806e77cd80f8b59890b7e101eabd078d9fb84e6937f9e85e4ecb61988df416f/ViT-B-16.pt |
||||
RUN wget --no-directories --progress=bar:force:noscroll -P /scratch/clip https://openaipublic.azureedge.net/clip/models/b8cca3fd41ae0c99ba7e8951adf17d267cdb84cd88be6f7c2e0eca1737a03836/ViT-L-14.pt |
Loading…
Reference in new issue