2023-05-19 19:59:29 +00:00
import re
2023-01-21 13:15:53 +00:00
import torch
2023-01-28 14:18:47 +00:00
import gradio as gr
2023-05-09 08:25:46 +00:00
from fastapi import FastAPI
2023-01-21 13:15:53 +00:00
import lora
import extra_networks_lora
import ui_extra_networks_lora
2023-01-25 08:29:46 +00:00
from modules import script_callbacks , ui_extra_networks , extra_networks , shared
2023-01-21 13:15:53 +00:00
def unload ( ) :
torch . nn . Linear . forward = torch . nn . Linear_forward_before_lora
2023-03-25 20:06:33 +00:00
torch . nn . Linear . _load_from_state_dict = torch . nn . Linear_load_state_dict_before_lora
2023-01-21 13:15:53 +00:00
torch . nn . Conv2d . forward = torch . nn . Conv2d_forward_before_lora
2023-03-25 20:06:33 +00:00
torch . nn . Conv2d . _load_from_state_dict = torch . nn . Conv2d_load_state_dict_before_lora
2023-03-26 07:44:20 +00:00
torch . nn . MultiheadAttention . forward = torch . nn . MultiheadAttention_forward_before_lora
torch . nn . MultiheadAttention . _load_from_state_dict = torch . nn . MultiheadAttention_load_state_dict_before_lora
2023-01-21 13:15:53 +00:00
def before_ui ( ) :
ui_extra_networks . register_page ( ui_extra_networks_lora . ExtraNetworksPageLora ( ) )
extra_networks . register_extra_network ( extra_networks_lora . ExtraNetworkLora ( ) )
if not hasattr ( torch . nn , ' Linear_forward_before_lora ' ) :
torch . nn . Linear_forward_before_lora = torch . nn . Linear . forward
2023-03-25 20:06:33 +00:00
if not hasattr ( torch . nn , ' Linear_load_state_dict_before_lora ' ) :
torch . nn . Linear_load_state_dict_before_lora = torch . nn . Linear . _load_from_state_dict
2023-01-21 13:15:53 +00:00
if not hasattr ( torch . nn , ' Conv2d_forward_before_lora ' ) :
torch . nn . Conv2d_forward_before_lora = torch . nn . Conv2d . forward
2023-03-25 20:06:33 +00:00
if not hasattr ( torch . nn , ' Conv2d_load_state_dict_before_lora ' ) :
torch . nn . Conv2d_load_state_dict_before_lora = torch . nn . Conv2d . _load_from_state_dict
2023-03-26 07:44:20 +00:00
if not hasattr ( torch . nn , ' MultiheadAttention_forward_before_lora ' ) :
torch . nn . MultiheadAttention_forward_before_lora = torch . nn . MultiheadAttention . forward
if not hasattr ( torch . nn , ' MultiheadAttention_load_state_dict_before_lora ' ) :
torch . nn . MultiheadAttention_load_state_dict_before_lora = torch . nn . MultiheadAttention . _load_from_state_dict
2023-01-21 13:15:53 +00:00
torch . nn . Linear . forward = lora . lora_Linear_forward
2023-03-25 20:06:33 +00:00
torch . nn . Linear . _load_from_state_dict = lora . lora_Linear_load_state_dict
2023-01-21 13:15:53 +00:00
torch . nn . Conv2d . forward = lora . lora_Conv2d_forward
2023-03-25 20:06:33 +00:00
torch . nn . Conv2d . _load_from_state_dict = lora . lora_Conv2d_load_state_dict
2023-03-26 07:44:20 +00:00
torch . nn . MultiheadAttention . forward = lora . lora_MultiheadAttention_forward
torch . nn . MultiheadAttention . _load_from_state_dict = lora . lora_MultiheadAttention_load_state_dict
2023-01-21 13:15:53 +00:00
script_callbacks . on_model_loaded ( lora . assign_lora_names_to_compvis_modules )
script_callbacks . on_script_unloaded ( unload )
script_callbacks . on_before_ui ( before_ui )
2023-05-08 04:28:30 +00:00
script_callbacks . on_infotext_pasted ( lora . infotext_pasted )
2023-01-25 08:29:46 +00:00
shared . options_templates . update ( shared . options_section ( ( ' extra_networks ' , " Extra Networks " ) , {
2023-05-10 18:21:32 +00:00
" sd_lora " : shared . OptionInfo ( " None " , " Add Lora to prompt " , gr . Dropdown , lambda : { " choices " : [ " None " , * lora . available_loras ] } , refresh = lora . list_available_loras ) ,
2023-05-19 19:59:29 +00:00
" lora_preferred_name " : shared . OptionInfo ( " Alias from file " , " When adding to prompt, refer to Lora by " , gr . Radio , { " choices " : [ " Alias from file " , " Filename " ] } ) ,
" lora_add_hashes_to_infotext " : shared . OptionInfo ( True , " Add Lora hashes to infotext " ) ,
2023-01-25 08:29:46 +00:00
} ) )
2023-05-08 09:07:43 +00:00
shared . options_templates . update ( shared . options_section ( ( ' compatibility ' , " Compatibility " ) , {
" lora_functional " : shared . OptionInfo ( False , " Lora: use old method that takes longer when you have multiple Loras active and produces same results as kohya-ss/sd-webui-additional-networks extension " ) ,
} ) )
2023-05-09 08:25:46 +00:00
def create_lora_json ( obj : lora . LoraOnDisk ) :
return {
" name " : obj . name ,
" alias " : obj . alias ,
" path " : obj . filename ,
" metadata " : obj . metadata ,
}
def api_loras ( _ : gr . Blocks , app : FastAPI ) :
@app.get ( " /sdapi/v1/loras " )
async def get_loras ( ) :
return [ create_lora_json ( obj ) for obj in lora . available_loras . values ( ) ]
2023-05-19 09:37:34 +00:00
2023-05-18 21:12:01 +00:00
@app.post ( " /sdapi/v1/refresh-loras " )
async def refresh_loras ( ) :
return lora . list_available_loras ( )
2023-05-09 08:25:46 +00:00
script_callbacks . on_app_started ( api_loras )
2023-05-19 19:59:29 +00:00
re_lora = re . compile ( " <lora:([^:]+): " )
def infotext_pasted ( infotext , d ) :
hashes = d . get ( " Lora hashes " )
if not hashes :
return
hashes = [ x . strip ( ) . split ( ' : ' , 1 ) for x in hashes . split ( " , " ) ]
hashes = { x [ 0 ] . strip ( ) . replace ( " , " , " " ) : x [ 1 ] . strip ( ) for x in hashes }
def lora_replacement ( m ) :
alias = m . group ( 1 )
shorthash = hashes . get ( alias )
if shorthash is None :
return m . group ( 0 )
lora_on_disk = lora . available_lora_hash_lookup . get ( shorthash )
if lora_on_disk is None :
return m . group ( 0 )
return f ' <lora: { lora_on_disk . get_alias ( ) } : '
d [ " Prompt " ] = re . sub ( re_lora , lora_replacement , d [ " Prompt " ] )
script_callbacks . on_infotext_pasted ( infotext_pasted )