parser.add_argument("--config",type=str,default=os.path.join(sd_path,"configs/stable-diffusion/v1-inference.yaml"),help="path to config which constructs model",)
parser.add_argument("--ckpt",type=str,default=sd_model_file,help="path to checkpoint of stable diffusion model; this checkpoint will be added to the list of checkpoints and loaded by default if you don't have a checkpoint selected in settings",)
parser.add_argument("--ckpt-dir",type=str,default=os.path.join(script_path,'models'),help="path to directory with stable diffusion checkpoints",)
parser.add_argument("--no-progressbar-hiding",action='store_true',help="do not hide progressbar in gradio UI (we hide it because it slows down ML if you have hardware acceleration in browser)")
parser.add_argument("--medvram",action='store_true',help="enable stable diffusion model optimizations for sacrificing a little speed for low VRM usage")
parser.add_argument("--lowvram",action='store_true',help="enable stable diffusion model optimizations for sacrificing a lot of speed for very low VRM usage")
parser.add_argument("--always-batch-cond-uncond",action='store_true',help="disables cond/uncond batching that is enabled to save memory with --medvram or --lowvram")
parser.add_argument("--precision",type=str,help="evaluate at this precision",choices=["full","autocast"],default="autocast")
parser.add_argument("--share",action='store_true',help="use share=True for gradio and make the UI accessible through their site (doesn't work for me but you might have better luck)")
parser.add_argument("--opt-split-attention",action='store_true',help="force-enables cross-attention layer optimization. By default, it's on for torch.cuda and off for other torch devices.")
parser.add_argument("--opt-split-attention-v1",action='store_true',help="enable older version of split attention optimization that does not consume all the VRAM it can find")
parser.add_argument("--port",type=int,help="launch gradio with given server port, you need root/admin rights for ports < 1024, defaults to 7860 if available",default=None)
parser.add_argument("--gradio-auth",type=str,help='set gradio authentication like "username:password"; or comma-delimit multiple like "u1:p1,u2:p2,u3:p3"',default=None)
parser.add_argument("--use-textbox-seed",action='store_true',help="use textbox for seeds in UI (no up/down, but possible to input long seeds)",default=False)
"enable_pnginfo":OptionInfo(True,"Save text information about generation parameters as chunks to png files"),
"add_model_hash_to_info":OptionInfo(False,"Add model hash to generation information"),
"save_txt":OptionInfo(False,"Create a text file next to every image with generation parameters."),
"ESRGAN_tile":OptionInfo(192,"Tile size for ESRGAN upscalers. 0 = no tiling.",gr.Slider,{"minimum":0,"maximum":512,"step":16}),
"ESRGAN_tile_overlap":OptionInfo(8,"Tile overlap, in pixels for ESRGAN upscalers. Low values = visible seam.",gr.Slider,{"minimum":0,"maximum":48,"step":1}),
"realesrgan_enabled_models":OptionInfo(["Real-ESRGAN 4x plus","Real-ESRGAN 4x plus anime 6B"],"Select which RealESRGAN models to show in the web UI. (Requires restart)",gr.CheckboxGroup,lambda:{"choices":realesrgan_models_names()}),
"SWIN_tile":OptionInfo(192,"Tile size for all SwinIR.",gr.Slider,{"minimum":16,"maximum":512,"step":16}),
"SWIN_tile_overlap":OptionInfo(8,"Tile overlap, in pixels for SwinIR. Low values = visible seam.",gr.Slider,{"minimum":0,"maximum":48,"step":1}),
"random_artist_categories":OptionInfo([],"Allowed categories for random artists selection when using the Roll button",gr.CheckboxGroup,{"choices":artist_db.categories()}),
"upscaler_for_hires_fix":OptionInfo(None,"Upscaler for highres. fix",gr.Radio,lambda:{"choices":[x.nameforxinsd_upscalers]}),
"show_progress_every_n_steps":OptionInfo(0,"Show show image creation progress every N sampling steps. Set 0 to disable.",gr.Slider,{"minimum":0,"maximum":32,"step":1}),
"multiple_tqdm":OptionInfo(True,"Add a second progress bar to the console that shows progress for an entire job. Broken in PyCharm console."),
"memmon_poll_rate":OptionInfo(8,"VRAM usage polls per second during generation. Set to 0 to disable.",gr.Slider,{"minimum":0,"maximum":40,"step":1}),
"img2img_color_correction":OptionInfo(False,"Apply color correction to img2img results to match original colors."),
"img2img_fix_steps":OptionInfo(False,"With img2img, do exactly the amount of steps the slider specifies (normally you'd do less with less denoising)."),
"enable_quantization":OptionInfo(False,"Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds. Requires restart to apply."),
"font":OptionInfo("","Font for image grids that have text"),
"enable_emphasis":OptionInfo(True,"Use (text) to make model pay more attention to text and [text] to make it pay less attention"),
"enable_batch_seeds":OptionInfo(True,"Make K-diffusion samplers produce same images in a batch as when making a single image"),