2023-10-18 06:35:50 +00:00
|
|
|
import torch
|
|
|
|
import network
|
2023-10-21 21:42:24 +00:00
|
|
|
from modules import devices
|
2023-10-18 06:35:50 +00:00
|
|
|
|
|
|
|
|
|
|
|
class ModuleTypeOFT(network.ModuleType):
|
|
|
|
def create_module(self, net: network.Network, weights: network.NetworkWeights):
|
|
|
|
if all(x in weights.w for x in ["oft_blocks"]):
|
|
|
|
return NetworkModuleOFT(net, weights)
|
|
|
|
|
|
|
|
return None
|
|
|
|
|
2023-10-19 19:41:17 +00:00
|
|
|
# adapted from kohya's implementation https://github.com/kohya-ss/sd-scripts/blob/main/networks/oft.py
|
2023-10-18 06:35:50 +00:00
|
|
|
class NetworkModuleOFT(network.NetworkModule):
|
|
|
|
def __init__(self, net: network.Network, weights: network.NetworkWeights):
|
2023-10-18 11:16:01 +00:00
|
|
|
|
2023-10-18 06:35:50 +00:00
|
|
|
super().__init__(net, weights)
|
|
|
|
|
|
|
|
self.oft_blocks = weights.w["oft_blocks"]
|
|
|
|
self.alpha = weights.w["alpha"]
|
|
|
|
self.dim = self.oft_blocks.shape[0]
|
|
|
|
self.num_blocks = self.dim
|
|
|
|
|
|
|
|
if "Linear" in self.sd_module.__class__.__name__:
|
|
|
|
self.out_dim = self.sd_module.out_features
|
|
|
|
elif "Conv" in self.sd_module.__class__.__name__:
|
|
|
|
self.out_dim = self.sd_module.out_channels
|
|
|
|
|
2023-10-19 19:41:17 +00:00
|
|
|
self.constraint = self.alpha * self.out_dim
|
2023-10-18 06:35:50 +00:00
|
|
|
self.block_size = self.out_dim // self.num_blocks
|
|
|
|
|
2023-10-18 11:16:01 +00:00
|
|
|
self.org_module: list[torch.Module] = [self.sd_module]
|
2023-10-21 21:42:24 +00:00
|
|
|
self.org_weight = self.org_module[0].weight.to(self.org_module[0].weight.device, copy=True)
|
|
|
|
#self.org_weight = self.org_module[0].weight.to(devices.cpu, copy=True)
|
2023-10-19 19:41:17 +00:00
|
|
|
self.R = self.get_weight(self.oft_blocks)
|
2023-10-21 21:42:24 +00:00
|
|
|
|
|
|
|
self.merged_weight = self.merge_weight()
|
2023-10-18 11:16:01 +00:00
|
|
|
self.apply_to()
|
2023-10-21 21:42:24 +00:00
|
|
|
self.merged = False
|
|
|
|
|
|
|
|
|
|
|
|
def merge_weight(self):
|
|
|
|
org_sd = self.org_module[0].state_dict()
|
|
|
|
R = self.R.to(self.org_weight.device, dtype=self.org_weight.dtype)
|
|
|
|
if self.org_weight.dim() == 4:
|
|
|
|
weight = torch.einsum("oihw, op -> pihw", self.org_weight, R)
|
|
|
|
else:
|
|
|
|
weight = torch.einsum("oi, op -> pi", self.org_weight, R)
|
|
|
|
org_sd['weight'] = weight
|
|
|
|
# replace weight
|
|
|
|
#self.org_module[0].load_state_dict(org_sd)
|
|
|
|
return weight
|
|
|
|
pass
|
|
|
|
|
|
|
|
def replace_weight(self, new_weight):
|
|
|
|
org_sd = self.org_module[0].state_dict()
|
|
|
|
org_sd['weight'] = new_weight
|
|
|
|
self.org_module[0].load_state_dict(org_sd)
|
|
|
|
self.merged = True
|
|
|
|
|
|
|
|
def restore_weight(self):
|
|
|
|
org_sd = self.org_module[0].state_dict()
|
|
|
|
org_sd['weight'] = self.org_weight
|
|
|
|
self.org_module[0].load_state_dict(org_sd)
|
|
|
|
self.merged = False
|
|
|
|
|
2023-10-18 11:16:01 +00:00
|
|
|
|
|
|
|
# replace forward method of original linear rather than replacing the module
|
2023-10-19 19:41:17 +00:00
|
|
|
# how do we revert this to unload the weights?
|
2023-10-18 11:16:01 +00:00
|
|
|
def apply_to(self):
|
|
|
|
self.org_forward = self.org_module[0].forward
|
2023-10-21 20:43:31 +00:00
|
|
|
#self.org_module[0].forward = self.forward
|
2023-10-21 21:42:24 +00:00
|
|
|
self.org_module[0].register_forward_pre_hook(self.pre_forward_hook)
|
2023-10-21 20:43:31 +00:00
|
|
|
self.org_module[0].register_forward_hook(self.forward_hook)
|
2023-10-19 19:52:14 +00:00
|
|
|
|
2023-10-19 19:41:17 +00:00
|
|
|
def get_weight(self, oft_blocks, multiplier=None):
|
2023-10-21 21:42:24 +00:00
|
|
|
constraint = self.constraint.to(oft_blocks.device, dtype=oft_blocks.dtype)
|
2023-10-19 19:41:17 +00:00
|
|
|
block_Q = oft_blocks - oft_blocks.transpose(1, 2)
|
2023-10-18 06:35:50 +00:00
|
|
|
norm_Q = torch.norm(block_Q.flatten())
|
2023-10-21 21:42:24 +00:00
|
|
|
new_norm_Q = torch.clamp(norm_Q, max=constraint)
|
2023-10-18 06:35:50 +00:00
|
|
|
block_Q = block_Q * ((new_norm_Q + 1e-8) / (norm_Q + 1e-8))
|
2023-10-21 21:42:24 +00:00
|
|
|
m_I = torch.eye(self.block_size, device=oft_blocks.device).unsqueeze(0).repeat(self.num_blocks, 1, 1)
|
2023-10-19 20:13:02 +00:00
|
|
|
block_R = torch.matmul(m_I + block_Q, (m_I - block_Q).inverse())
|
2023-10-19 19:41:17 +00:00
|
|
|
#block_R_weighted = multiplier * block_R + (1 - multiplier) * I
|
|
|
|
#R = torch.block_diag(*block_R_weighted)
|
|
|
|
R = torch.block_diag(*block_R)
|
2023-10-18 06:35:50 +00:00
|
|
|
|
|
|
|
return R
|
|
|
|
|
|
|
|
def calc_updown(self, orig_weight):
|
2023-10-21 21:42:24 +00:00
|
|
|
#oft_blocks = self.oft_blocks.to(orig_weight.device, dtype=orig_weight.dtype)
|
2023-10-18 11:27:44 +00:00
|
|
|
|
2023-10-21 21:42:24 +00:00
|
|
|
#R = self.R.to(orig_weight.device, dtype=orig_weight.dtype)
|
|
|
|
##self.R = R
|
2023-10-18 11:27:44 +00:00
|
|
|
|
2023-10-21 21:42:24 +00:00
|
|
|
#if orig_weight.dim() == 4:
|
|
|
|
# weight = torch.einsum("oihw, op -> pihw", orig_weight, R)
|
|
|
|
#else:
|
|
|
|
# weight = torch.einsum("oi, op -> pi", orig_weight, R)
|
2023-10-18 11:27:44 +00:00
|
|
|
|
2023-10-21 21:42:24 +00:00
|
|
|
#updown = orig_weight @ R
|
|
|
|
#updown = weight
|
|
|
|
updown = torch.zeros_like(orig_weight, device=orig_weight.device, dtype=orig_weight.dtype)
|
|
|
|
#updown = orig_weight
|
|
|
|
output_shape = orig_weight.shape
|
|
|
|
#orig_weight = self.merged_weight.to(orig_weight.device, dtype=orig_weight.dtype)
|
|
|
|
#output_shape = self.oft_blocks.shape
|
2023-10-18 11:27:44 +00:00
|
|
|
|
2023-10-18 06:35:50 +00:00
|
|
|
return self.finalize_updown(updown, orig_weight, output_shape)
|
2023-10-21 20:43:31 +00:00
|
|
|
|
2023-10-21 21:42:24 +00:00
|
|
|
def pre_forward_hook(self, module, input):
|
|
|
|
if not self.merged:
|
|
|
|
self.replace_weight(self.merged_weight)
|
|
|
|
|
|
|
|
|
2023-10-21 20:43:31 +00:00
|
|
|
def forward_hook(self, module, args, output):
|
2023-10-21 21:42:24 +00:00
|
|
|
if self.merged:
|
|
|
|
pass
|
|
|
|
#self.restore_weight()
|
2023-10-21 20:43:31 +00:00
|
|
|
#print(f'Forward hook in {self.network_key} called')
|
2023-10-19 19:41:17 +00:00
|
|
|
|
2023-10-21 21:42:24 +00:00
|
|
|
#x = output
|
|
|
|
#R = self.R.to(x.device, dtype=x.dtype)
|
|
|
|
|
|
|
|
#if x.dim() == 4:
|
|
|
|
# x = x.permute(0, 2, 3, 1)
|
|
|
|
# x = torch.matmul(x, R)
|
|
|
|
# x = x.permute(0, 3, 1, 2)
|
|
|
|
#else:
|
|
|
|
# x = torch.matmul(x, R)
|
|
|
|
#return x
|
2023-10-21 20:43:31 +00:00
|
|
|
|
|
|
|
# def forward(self, x, y=None):
|
|
|
|
# x = self.org_forward(x)
|
|
|
|
# if self.multiplier() == 0.0:
|
|
|
|
# return x
|
|
|
|
|
|
|
|
# # calculating R here is excruciatingly slow
|
|
|
|
# #R = self.get_weight().to(x.device, dtype=x.dtype)
|
|
|
|
# R = self.R.to(x.device, dtype=x.dtype)
|
|
|
|
|
|
|
|
# if x.dim() == 4:
|
|
|
|
# x = x.permute(0, 2, 3, 1)
|
|
|
|
# x = torch.matmul(x, R)
|
|
|
|
# x = x.permute(0, 3, 1, 2)
|
|
|
|
# else:
|
|
|
|
# x = torch.matmul(x, R)
|
|
|
|
# return x
|