1
mirror of https://github.com/comfyanonymous/ComfyUI.git synced 2025-08-02 23:14:49 +08:00

seperates out arg parser and imports args

This commit is contained in:
EllangoK
2023-04-05 23:41:23 -04:00
parent dd29966f8a
commit e5e587b1c0
4 changed files with 88 additions and 84 deletions

27
main.py
View File

@@ -1,37 +1,14 @@
import argparse
import asyncio
import os
import shutil
import sys
import threading
from comfy.cli_args import args
if os.name == "nt":
import logging
logging.getLogger("xformers").addFilter(lambda record: 'A matching Triton is not available' not in record.getMessage())
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Script Arguments")
parser.add_argument("--listen", type=str, default="127.0.0.1", help="Listen on IP or 0.0.0.0 if none given so the UI can be accessed from other computers.")
parser.add_argument("--port", type=int, default=8188, help="Set the listen port.")
parser.add_argument("--extra-model-paths-config", type=str, default=None, help="Load an extra_model_paths.yaml file.")
parser.add_argument("--output-directory", type=str, default=None, help="Set the ComfyUI output directory.")
parser.add_argument("--dont-upcast-attention", action="store_true", help="Disable upcasting of attention. Can boost speed but increase the chances of black images.")
parser.add_argument("--use-split-cross-attention", action="store_true", help="Use the split cross attention optimization instead of the sub-quadratic one. Ignored when xformers is used.")
parser.add_argument("--use-pytorch-cross-attention", action="store_true", help="Use the new pytorch 2.0 cross attention function.")
parser.add_argument("--disable-xformers", action="store_true", help="Disable xformers.")
parser.add_argument("--cuda-device", type=int, default=None, help="Set the id of the cuda device this instance will use.")
parser.add_argument("--highvram", action="store_true", help="By default models will be unloaded to CPU memory after being used. This option keeps them in GPU memory.")
parser.add_argument("--normalvram", action="store_true", help="Used to force normal vram use if lowvram gets automatically enabled.")
parser.add_argument("--lowvram", action="store_true", help="Split the unet in parts to use less vram.")
parser.add_argument("--novram", action="store_true", help="When lowvram isn't enough.")
parser.add_argument("--cpu", action="store_true", help="To use the CPU for everything (slow).")
parser.add_argument("--dont-print-server", action="store_true", help="Don't print server output.")
parser.add_argument("--quick-test-for-ci", action="store_true", help="Quick test for CI.")
parser.add_argument("--windows-standalone-build", action="store_true", help="Windows standalone build.")
args = parser.parse_args()
if args.dont_upcast_attention:
print("disabling upcasting of attention")
os.environ['ATTN_PRECISION'] = "fp16"
@@ -121,7 +98,7 @@ if __name__ == "__main__":
if args.output_directory:
output_dir = os.path.abspath(args.output_directory)
print("setting output directory to:", output_dir)
print(f"Setting output directory to: {output_dir}")
folder_paths.set_output_directory(output_dir)
port = args.port