[docs]classAveragePoolGrad(OpRun):def_run(self,out,auto_pad=None,ceil_mode=None,count_include_pad=None,kernel_shape=None,pads=None,strides=None,):assertauto_padisnotNone,"auto_pad is None"assertceil_modeisnotNone,"ceil_mode is None"assertcount_include_padisnotNone,"count_include_pad is None"assertkernel_shapeisnotNone,"kernel_shape is None"assertpadsisnotNone,"pads is None"assertstridesisnotNone,"strides is None"assertauto_pad=="NOTSET",f"Not implemented for autopad={auto_pad!r}"assertceil_mode==0,f"Not implemented for ceil_mode={ceil_mode!r}"assert(count_include_pad==1),f"Not implemented for count_include_pad={count_include_pad!r}"grad_shape=list(out.shape[:2])foriinrange(len(kernel_shape)):d=(out.shape[i+2]*strides[i]+kernel_shape[i]-1+sum(pads[i*2:i*2+2]))grad_shape.append(d)grad=np.zeros(tuple(grad_shape),dtype=out.dtype)scale=(1.0/np.prod(kernel_shape)).astype(out.dtype)iflen(grad_shape)==4:# 2Dforbatchinrange(grad.shape[0]):forchannelinrange(grad.shape[1]):foriinrange(out.shape[2]):t=max(i*strides[0]-pads[0],0)b=min(i*strides[0]-pads[0]+kernel_shape[0],grad.shape[2])forjinrange(out.shape[3]):le=max(j*strides[1]-pads[2],0)ri=min(j*strides[1]-pads[2]+kernel_shape[1],grad.shape[3],)grad[batch,channel,t:b,le:ri]+=(out[batch,channel,i,j]*scale)else:raiseNotImplementedError(f"AveragePoolGrad is not implemented for shape={out.shape}.")return(grad.astype(out.dtype),)