wglint / 2_sdv2-1

Stable Diffusion 2.1 - NSFW - Supabase

  • Public
  • 357 runs
  • GitHub

What do and how work this model

What do this model

This model name 2_sdv2-1 can generate picture with Stable Diffusion 2.1 model you can find in huggingface her

You can generate picture and choice if :

  • NSFW : Choice to use a NSFW filter or not.

  • Supabase : Send your picture to supabase with output-{i}-{uuid}.png name to bucket headshot.

How this model work

Before start, we need to have Cog and Docker. For learn Cog, click her for Github Doc. But for start, use brew for install Cog :

brew install cog

After for this model, i use only 3 files :

All the code is in this repo Github.

Or, let check all code her :

cog.yaml

# Configuration for Cog ⚙️
# Reference: https://github.com/replicate/cog/blob/main/docs/yaml.md

build:
  # Need to see more about GPU parameters. I set false cause gpu true make supabase modulo don't work
  gpu: false
  cuda: "11.8"

  python_version: "3.11.1"
  python_packages:
    - "supabase"
    - "diffusers==0.11.1"
    - "torch==1.13.0"
    - "ftfy==6.1.1"
    - "scipy==1.9.3"
    - "transformers==4.25.1"
    - "accelerate==0.15.0"
    - "huggingface-hub==0.13.2"


  run:
    - "echo env is ready!"

predict: "predict.py:Predictor"
image: "r8.im/wglint/2_sdv2-1"

predict.py

# Prediction interface for Cog ⚙️
# https://github.com/replicate/cog/blob/main/docs/python.md

from cog import BasePredictor, Input, Path
from supabase import create_client, Client
import uuid

from diffusers import StableDiffusionPipeline, DPMSolverMultistepScheduler, AutoencoderKL
from diffusers.pipelines.stable_diffusion.safety_checker import (
    StableDiffusionSafetyChecker,
)

import torch
from typing import List


MODEL_ID = "stabilityai/stable-diffusion-2-1"
MODEL_CACHE = "diffusers-cache"
MODEL_VAE_CACHE = "vae-cache"
SAFETY_MODEL_ID = "CompVis/stable-diffusion-safety-checker"

class Predictor(BasePredictor):
    def setup(self):
        print("Loading pipeline...")
        safety_checker = StableDiffusionSafetyChecker.from_pretrained(
            SAFETY_MODEL_ID,
            cache_dir=MODEL_CACHE
        )

        self.pipe_nfsw_check = StableDiffusionPipeline.from_pretrained(
            MODEL_ID,
            safety_checker=safety_checker,
            cache_dir=MODEL_CACHE
        ).to("cuda")
        self.pipe_no_nsfw_check = StableDiffusionPipeline.from_pretrained(
            MODEL_ID,
            cache_dir=MODEL_CACHE
        ).to("cuda")


    @torch.inference_mode()
    def predict(
        self,
        NSFW_Detector : bool = Input(description="Make diffusers model use NSFW Detector", default=True),
        Prompt : str = Input(description="Prompt of the picture", default="black cat"),
        num_inteference_steps : int = Input(description="Number of steps", le=50, default=20),
        width : int = Input(description="Width of the picture", le=1024, default=512),
        height : int = Input(description="Height of the picture", le=1024, default=512),
        number_picture : int = Input(description="Number of picture in output", ge=1, le=4, default=1),
        seed : int = Input(description="Seed for random generator", default=1334),
        supabase : bool = Input(description="Send picture to a supabase storage", default=False),
        supabase_key : str = Input(description="Supabase KEY use for your projet", default="eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJpc3MiOiJzdXBhYmFzZSIsInJlZiI6Im1lc2JnZnZyaHpsZ2JqaWZvd3FhIiwicm9sZSI6InNlcnZpY2Vfcm9sZSIsImlhdCI6MTY5OTgyNTkyMCwiZXhwIjoyMDE1NDAxOTIwfQ.nUaWB1bJdxcz2utI6T-bmsvytPJEaLE0Q7Iv_j_tX0Y"),
        supabase_url : str = Input(description="Supabase URL use for your projet", default="https://mesbgfvrhzlgbjifowqa.supabase.co")
    ) -> List[Path]:

        generator = torch.Generator("cuda").manual_seed(seed)
        Parameters = {
            "prompt": [Prompt] * number_picture,
            "num_inference_steps": num_inteference_steps,
            "width": width,
            "height": height,
            "generator": generator,
            "guidance_scale" : 7
        }

        if supabase:
            supabase_client : Client = create_client(supabase_url, supabase_key)

        if NSFW_Detector:
            print("NSFW Detector")
            self.pipe_nfsw_check.scheduler = DPMSolverMultistepScheduler.from_config(
                self.pipe_nfsw_check.scheduler.config
            )
            image = self.pipe_nfsw_check(
                **Parameters
            )

        else:
            print("No NSFW Detector")
            self.pipe_no_nsfw_check.scheduler = DPMSolverMultistepScheduler.from_config(
                self.pipe_no_nsfw_check.scheduler.config
            )

            image = self.pipe_no_nsfw_check(
                **Parameters
            )

        output = []
        print(image)
        print(f"la longueur de la liste est pour image.images {len(image.images)}, et pour image {len(image)}")
        for i,sample in enumerate(image.images):

            if NSFW_Detector : 
                print(f"L'image numéro {i} {'est NSFW' if image.nsfw_content_detected[i] else 'n''est pas NSFW'}")
                if image.nsfw_content_detected[i]:
                    print("NSFW Detected, retrying... with a beautiful cat !!")
                    new_image = self.pipe_nfsw_check(
                        prompt = "black cat",
                        num_inference_steps = num_inteference_steps,
                        width = width,
                        height = height,
                        generator = generator,
                        guidance_scale = 7
                    )
                    output_path = f"/tmp/out-{i}.png"
                    new_image.images[i].save(output_path)
                    output.append(Path(output_path))

                    if supabase:
                        with open(output_path, "rb") as f:
                            supabase_client.storage.from_('headshot').upload(
                                file=f,
                                path=f"out-{i}-{uuid.uuid4()}.png",
                                file_options={"content-type": "image/png"}
                            )

                    continue

            output_path = f"/tmp/out-{i}.png"
            sample.save(output_path)
            output.append(Path(output_path))

            if supabase:
                with open(output_path, "rb") as f:
                    supabase_client.storage.from_('headshot').upload(
                        file=f,
                        path=f"out-{i}-{uuid.uuid4()}.png",
                        file_options={"content-type": "image/png"}
                    )

        return output

Let’s check my other model !