Updated code for legibility

This commit is contained in:
Fampai 2022-10-09 04:32:40 -04:00 committed by AUTOMATIC1111
parent ec2bd9be75
commit ad3ae44108

View File

@ -284,8 +284,11 @@ class FrozenCLIPEmbedderWithCustomWords(torch.nn.Module):
tmp = -opts.CLIP_stop_at_last_layers tmp = -opts.CLIP_stop_at_last_layers
outputs = self.wrapped.transformer(input_ids=tokens, position_ids=position_ids, output_hidden_states=tmp) outputs = self.wrapped.transformer(input_ids=tokens, position_ids=position_ids, output_hidden_states=tmp)
if tmp < -1:
z = outputs.hidden_states[tmp] z = outputs.hidden_states[tmp]
z = self.wrapped.transformer.text_model.final_layer_norm(z) z = self.wrapped.transformer.text_model.final_layer_norm(z)
else:
z = outputs.last_hidden_state
# restoring original mean is likely not correct, but it seems to work well to prevent artifacts that happen otherwise # restoring original mean is likely not correct, but it seems to work well to prevent artifacts that happen otherwise
batch_multipliers_of_same_length = [x + [1.0] * (target_token_count - len(x)) for x in batch_multipliers] batch_multipliers_of_same_length = [x + [1.0] * (target_token_count - len(x)) for x in batch_multipliers]