引用包
1 2 3 4 5 6 7 8 9 10 11 12 13 14 import osimport cv2import numpy as npfrom PIL import Imagefrom torchvision import transformsimport torchimport torch.nn.functional as Ffrom model_data.conv_model import convnext_small as create_modelimport captum.attr as attrfrom lime import lime_imagefrom lime.wrappers.scikit_image import SegmentationAlgorithmimport torch.nn.functional as Ffrom skimage import segmentation
位置目录
1 2 3 4 data2_path weight_path = os.path.join(data2_path,"output_weights" )
model类
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 class model : def __init__ (self ): self.checkpoint = os.path.join(weight_path,"flower/process_data_flower_72_0.986.pth" ) self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" ) def load (self, dir_path ): self.dir_path = dir_path self.model = create_model(num_classes=5 ) self.model.load_state_dict(torch.load(self.checkpoint, map_location=self.device)) self.model.to(self.device) self.integrated_gradients = attr.IntegratedGradients(self.model) self.guided_backprop = attr.GuidedBackprop(self.model) self.model.eval () def multiScaleSharpen (self, src, Radius ): def get_integrated_gradients (self,img,predict_cla ): def get_smoothgrad (self, input_image, predict_cla, num_samples=20 , stdev_spread=0.2 , clip_range=(0 , 1 ) ): def get_guided_backprop (self, img, predict_cla ): def predict (self, input_image ): return result if __name__ == "__main__" : result = model() result.load(os.getcwd()) for i in range (0 ,5 ): img = os.path.join(os.getcwd(),"aaa/image/flower/{}.jpg" .format (i)) img = cv2.imread(img) print ("{}类别的预测结果为:{}" .format (i, result.predict(img)))
predict函数
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 def predict (self, input_image ): input_image = self.multiScaleSharpen(input_image, 5 ) input_image = cv2.cvtColor(input_image, cv2.COLOR_BGR2RGB) image = Image.fromarray(input_image) img_size = 224 data_transform = transforms.Compose( [transforms.Resize(int (img_size * 1.14 )), transforms.CenterCrop(img_size), transforms.ToTensor(), transforms.Normalize([0.485 , 0.456 , 0.406 ], [0.229 , 0.224 , 0.225 ])]) img = data_transform(image) img = torch.unsqueeze(img, dim=0 ) predict_s = [] with torch.no_grad(): output = torch.squeeze(self.model(img.to(self.device))).cpu() predict = torch.softmax(output, dim=0 ) predict_scores = predict.tolist() predict_cla = torch.argmax(predict).item() self.get_smoothgrad(img.cpu().numpy(), predict_cla) result = predict_cla return result
get_smoothgrad函数
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 def get_smoothgrad (self, input_image, predict_cla, num_samples=20 , stdev_spread=0.2 , clip_range=(0 , 1 ) ): input_image = torch.from_numpy(input_image).to(self.device) input_image.requires_grad = True self.model.zero_grad() total_gradients = [] for _ in range (num_samples): noisy_image = input_image + torch.randn_like(input_image) * stdev_spread noisy_image = torch.clamp(noisy_image, *clip_range) output = self.model(noisy_image) output = F.softmax(output, dim=1 ) output[:, predict_cla].mean().backward() gradients = input_image.grad.detach().cpu().numpy() total_gradients.append(gradients) smooth_grad = np.mean(total_gradients, axis=0 ) smooth_grad = (smooth_grad - np.min (smooth_grad)) / (np.max (smooth_grad) - np.min (smooth_grad)) smoothgrad_img = np.transpose(smooth_grad.squeeze(), (1 , 2 , 0 )) smoothgrad_img = cv2.convertScaleAbs(smoothgrad_img * 255 ) save_path = os.path.join(data2_path,"aaa/flower/explanations-test" ,"guiyi-mean-20" ) if not os.path.exists(save_path): os.makedirs(save_path) cv2.imwrite(os.path.join(save_path,"processed_{}.jpg" .format (predict_cla)), smoothgrad_img)
将输入图像转换为PyTorch张量并发送到指定设备(如GPU)
1 input_image = torch.from_numpy(input_image).to(self.device)
设置输入图像的requires_grad属性为True,以支持梯度计算
1 input_image.requires_grad = True
将模型的梯度置零,准备进行反向传播
用于存储每次迭代的梯度
进入循环,设置了多少num_samples样本就循环多少次,这里是200。
向输入图像添加随机噪声
1 noisy_image = input_image + torch.randn_like(input_image) * stdev_spread
限制噪声图像的像素值在指定范围内,如(0,1)。通过使用torch.clamp()函数并传递(0,1)作为参数,可以确保生成的噪声图像的像素值范围在0到1之间,以满足数据处理和模型的要求
1 noisy_image = torch.clamp(noisy_image, *clip_range)
使用模型对噪声图像进行预测
1 output = self.model(noisy_image)
对预测结果进行softmax归一化
1 output = F.softmax(output, dim=1 )
反向传播梯度,计算指定类别的均值作为损失函数。通过调用backward()方法进行反向传播时,梯度信息被存储在input_image的.grad属性中。这个属性记录了输入图像对应的梯度值 。
1 output[:, predict_cla].mean().backward()
提取输入图像的梯度,并转换为NumPy数组。.detach()
方法将梯度与计算图分离。这样做的目的是为了确保后续的计算不会对原始的梯度产生影响,只保留当前的梯度结果。此时,input_image.grad的值将不再更新。
这里获取的是分离后的梯度信息,是前一行代码执行时的梯度结果,并不包含后续对模型或损失函数的任何计算所产生的梯度更新 。
1 gradients = input_image.grad.detach().cpu().numpy()
计算所有梯度的均值得到SmoothGrad图像
1 smooth_grad = np.mean(total_gradients, axis=0 )
归一化
1 smooth_grad = (smooth_grad - np.min (smooth_grad)) / (np.max (smooth_grad) - np.min (smooth_grad))
调整图像维度顺序,以适应OpenCV的图像格式
1 smoothgrad_img = np.transpose(smooth_grad.squeeze(), (1 , 2 , 0 ))
smoothgrad_img*255
将像素值从[0, 1]的范围缩放到[0, 255]的范围内,
cv2.convertScaleAbs()
将缩放后的图像转换为无符号8位整数类型。通过截断(舍弃小数部分)使得所有像素的值都限制在[0, 255]的范围内
1 smoothgrad_img = cv2.convertScaleAbs(smoothgrad_img * 255 )
保存图像
1 2 3 4 save_path = os.path.join(data2_path,"aaa/flower/explanations-test" ,"guiyi-mean-20" ) if not os.path.exists(save_path): os.makedirs(save_path) cv2.imwrite(os.path.join(save_path,"processed_{}.jpg" .format (predict_cla)), smoothgrad_img)