У меня есть простая сеть в keras, и я определяю настраиваемый слой, который выполняет некоторые операции с входным тензором, а затем возвращает его в сеть, но когда я хочу его реализовать, он выдает следующую ошибку и говорит, что вход не был подан, пока Я думаю, что когда мы используем функцию соответствия, она питает сеть. не могли бы вы помочь мне с этой проблемой? Я не нашел подходящего ответа для решения своей проблемы. Я тоже поместил сюда свой код. Спасибо.
def C(u):
if u == 0:
return 1. / np.sqrt(2.)
else:
return 1.
def DCT(a, b):
for u in range(8):
for v in range(8):
for x in range(8):
for y in range(8):
b[u,v] = b[u, v] + 0.25 * C(u) * C(v) * a[x, y]* np.cos((2 * x+1) * (u) * np.pi / 16) * np.cos((2 * y+1) * (v) * np.pi / 16)
def IDCT(a, b):
for u in range(8):
for v in range(8):
for x in range(8):
for y in range(8):
b[x,y] = b[x, y] + 0.25 * C(u) * C(v) * a[u,v] * np.cos((2 * x+1) * (u) * np.pi / 16) * np.cos((2 * y+1) * (v) * np.pi / 16)
def quntize_mask(window_size: int, keep_count: int):
mask = np.zeros((window_size, window_size), dtype=np.uint8)
index_order = sorted(((x, y) for x in range(window_size) for y in range(window_size)),
key=lambda p: (p[0] + p[1], -p[1] if (p[0] + p[1]) % 2 else p[1]))
for i, j in index_order[0:keep_count]:
mask[i, j] = 1
return mask
def slicAndJpeg(img):
for i in range (int(img.shape[1].value/8)):
for j in range(int(img.shape[2].value/8)):
temp=(img[:,i*8:i*8+8,j*8:j*8+8])
tempb=np.zeros((8,8))
DCT(temp,tempb)
mask=quntize_mask(8,9)
qunz=Kr.layers.multiply(mask,tempb)
tempc=K.zeros((8,8))
IDCT(qunz,tempc)
img[:,i*8:i*8+8,j*8:j*8+8]=tempc
class JPEGLayer(Layer):
def __init__(self,**kwargs):
super(JPEGLayer, self).__init__(**kwargs)
self.supports_masking = True
def call(self, noised_image, training=True):
def noise():
# noised_image = noised_and_cover
# pad the image so that we can do dct on 8x8 blocks
pad_height = (8 - noised_image.shape[1] % 8) % 8
pad_width = (8 - noised_image.shape[2] % 8) % 8
noised_image_pad = Kr.layers.ZeroPadding2D(padding=(( pad_width, 0),( pad_height,0)))(noised_image)
slicAndJpeg(K.eval(noised_image_pad))
# un-pad
noised_and_cover = noised_image_pad[ :, :noised_image_pad.shape[1]-pad_height, :noised_image_pad.shape[2]-pad_width]
return noised_and_cover
return noise()
#-----------------building w train---------------------------------------------
wt_random=np.random.randint(2, size=(49999,4,4))
w_expand=wt_random.astype(np.float32)
wv_random=np.random.randint(2, size=(9999,4,4))
wv_expand=wv_random.astype(np.float32)
x,y,z=w_expand.shape
w_expand=w_expand.reshape((x,y,z,1))
x,y,z=wv_expand.shape
wv_expand=wv_expand.reshape((x,y,z,1))
#-----------------building w test---------------------------------------------
w_test = np.random.randint(2,size=(1,4,4))
w_test=w_test.astype(np.float32)
w_test=w_test.reshape((1,4,4,1))
#-----------------------encoder------------------------------------------------
#------------------------------------------------------------------------------
image = Input((28, 28, 1))
conv1 = Conv2D(64, (5, 5),activation='relu',padding='same', name='convl1e')(image)
wtm=Input((4,4,1))
#--------------------------------------------------------------
wpad=Kr.layers.Lambda(lambda xy: xy[0] + Kr.backend.spatial_2d_padding(xy[1], padding=((0, 24), (0, 24))))
encoded_merged=wpad([conv1,wtm])#-----------------------decoder------------------------------------------------
#------------------------------------------------------------------------------
decoded = Conv2D(1, (5, 5),activation='relu', padding='same', name='decoder_output')(encoded_merged)
model=Model(inputs=[image,wtm],outputs=decoded)
model.summary()
decoded_noise=JPEGLayer()(decoded)#16
#----------------------w extraction------------------------------------
convw1 = Conv2D(64, (5,5),activation='relu' , name='conl1w')(decoded_noise)#24
convw2 = Conv2D(64, (5,5),activation='relu' , name='conl2w')(convw1)#20
#Avw1=AveragePooling2D(pool_size=(2,2))(convw2)
convw3 = Conv2D(64, (5,5),activation='relu' ,name='conl3w')(convw2)#16
convw4 = Conv2D(64, (5,5), activation='relu' ,name='conl4w')(convw3)#12
#Avw2=AveragePooling2D(pool_size=(2,2))(convw4)
convw5 = Conv2D(64, (5,5), activation='relu' ,name='conl5w')(convw4)#8
convw6 = Conv2D(64, (5,5), activation='relu' ,name='conl6w')(convw5)#4
pred_w = Conv2D(1, (1, 1),activation='relu' ,padding='same', name='reconstructed_W')(convw6)
model1=Model(inputs=[image,wtm],outputs=[decoded,pred_w])
model1.summary()
#----------------------training the model--------------------------------------
#------------------------------------------------------------------------------
#----------------------Data preparesion----------------------------------------
(x_train, _), (x_test, _) = mnist.load_data()
x_validation=x_train[1:10000,:,:]
x_train=x_train[10001:60000,:,:]
#
x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
x_validation = x_validation.astype('float32') / 255.
x_train = np.reshape(x_train, (len(x_train), 28, 28, 1)) # adapt this if using `channels_first` image data format
x_test = np.reshape(x_test, (len(x_test), 28, 28, 1)) # adapt this if using `channels_first` image data format
x_validation = np.reshape(x_validation, (len(x_validation), 28, 28, 1))
#---------------------compile and train the model------------------------------
opt=SGD(momentum=0.99,lr=0.0001)
model1.compile(optimizer='adam', loss={'imageprim':'mse','wprim':'binary_crossentropy'}, loss_weights={'imageprim': 0.5, 'wprim': 1.0},metrics=['mae'])
es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=40)
#rlrp = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=20, min_delta=1E-4, verbose=1)
mc = ModelCheckpoint('sendAct.h5', monitor='val_loss', mode='min', verbose=1, save_best_only=True)
history=model1.fit([x_train,w_expand], [x_train,w_expand],
epochs=4000,
batch_size=32,
validation_data=([x_validation,wv_expand], [x_validation,wv_expand]),
callbacks=[TensorBoard(log_dir='/home/jamalm8/tensorboardGNWLoss/', histogram_freq=0, write_graph=False),es,mc])
model1.summary()
Отслеживание (последний вызов последний):
Файл "", строка 124, в decoded_noise = JPEGLayer () (декодированный) # 16
Файл "D: \ software \ Anaconda3 \ envs \ py36 \ lib \ site-packages \ keras \ engine \ base_layer.py", строка 457, в call output = self.call (inputs, ** kwargs)
Файл "", строка 94, в вызове return noise ()
Файл "", строка 88, в шуме SlAndJpeg (K.eval (noised_image_pad))
Файл "D: \ software \ Anaconda3 \ envs \ py36 \ lib \ site-packages \ keras \ backend \ tensorflow_backend.py", строка 673, в eval return to_dense (x) .eval (session = get_session ())
Файл "D: \ software \ Anaconda3 \ envs \ py36 \ lib \ site-packages \ tensorflow \ python \ framework \ ops.py", строка 713, в eval return _eval_using_default_session (self, feed_dict, self.graph, session)
Файл "D: \ software \ Anaconda3 \ envs \ py36 \ lib \ site-packages \ tensorflow \ python \ framework \ ops.py", строка 5157, в _eval_using_default_session return session.run (тензоры, feed_dict)
Файл "D: \ software \ Anaconda3 \ envs \ py36 \ lib \ site-packages \ tensorflow \ python \ client \ session.py", строка 929, при запуске run_metadata_ptr)
Файл "D: \ software \ Anaconda3 \ envs \ py36 \ lib \ site-packages \ tensorflow \ python \ client \ session.py", строка 1152, в _run feed_dict_tensor, options, run_metadata)
Файл "D: \ software \ Anaconda3 \ envs \ py36 \ lib \ site-packages \ tensorflow \ python \ client \ session.py", строка 1328, в _do_run run_metadata)
Файл "D: \ software \ Anaconda3 \ envs \ py36 \ lib \ site-packages \ tensorflow \ python \ client \ session.py", строка 1348, в типе повышения _do_call (e) (node_def, op, message)
InvalidArgumentError: вы должны передать значение тензора заполнителя 'input_1' с dtype float и shape [?, 28,28,1] [[node input_1 (определено в D: \ software \ Anaconda3 \ envs \ py36 \ lib \ site-packages \ keras \ backend \ tensorflow_backend.py: 517) = Placeholderdtype = DT_FLOAT, shape = [?, 28,28,1], _device = "/ job: localhost / replica: 0 / task: 0 / device: GPU: 0" ]] [[{{node jpeg_layer_1 / zero_padding2d_1 / Pad / _9}} = _Recvclient_terminated = false, recv_device = "/ job: localhost / replica: 0 / task: 0 / device: CPU: 0", send_device = "/ job: localhost / replica: 0 / task: 0 / device: GPU: 0 ", send_device_incarnation = 1, tensor_name =" edge_39_jpeg_layer_1 / zero_padding2d_1 / Pad ", tensor_type = DT_FLOAT, _device =" / job: localhost / replica: 0 / task: 0 / device: CPU: 0 "]]
InvalidArgumentError (см. Выше для трассировки): вы должны передать значение для тензора заполнителя 'input_1' с dtype float и shape [?, 28,28,1] [[node input_1 (определено в D: \ software \ Anaconda3 \ envs \ py36 \ lib \ site-packages \ keras \ backend \ tensorflow_backend.py: 517) = Placeholderdtype = DT_FLOAT, shape = [?, 28,28,1], _device = "/ job: localhost / replica: 0 / task: 0 / device: GPU: 0 "]] [[{{node jpeg_layer_1 / zero_padding2d_1 / Pad / _9}} = _Recvclient_terminated = false, recv_device =" / job: localhost / replica: 0 / task: 0 / device: CPU: 0 », send_device = "/ job: localhost / replica: 0 / task: 0 / device: GPU: 0", send_device_incarnation = 1, tensor_name = "edge_39_jpeg_layer_1 / zero_padding2d_1 / Pad", tensor_type = DT_FLOAT, _device = "/ job: localhost / replica : 0 / задача: 0 / устройство: ЦП: 0 "]]
Это вызвано строкой
в вашем _2_ классе. По сути, вы пытаетесь оценить тензор, вызывая _3_, не передавая ему никаких данных. Вы не можете вычислить пустой тензор, верно? Это можно исправить, полностью удалив эту _4_ функцию и выполнив заполнение / нарезку и другие задачи в предварительной обработке.def C(u): if u == 0: return 1. / np.sqrt(2.) else: return 1. def DCT(a, b): for u in range(8): for v in range(8): for x in range(8): for y in range(8): b[u,v] = b[u, v] + 0.25 * C(u) * C(v) * a[x, y]* np.cos((2 * x+1) * (u) * np.pi / 16) * np.cos((2 * y+1) * (v) * np.pi / 16) def IDCT(a, b): for u in range(8): for v in range(8): for x in range(8): for y in range(8): b[x,y] = b[x, y] + 0.25 * C(u) * C(v) * a[u,v] * np.cos((2 * x+1) * (u) * np.pi / 16) * np.cos((2 * y+1) * (v) * np.pi / 16) def quntize_mask(window_size: int, keep_count: int): mask = np.zeros((window_size, window_size), dtype=np.uint8) index_order = sorted(((x, y) for x in range(window_size) for y in range(window_size)), key=lambda p: (p[0] + p[1], -p[1] if (p[0] + p[1]) % 2 else p[1])) for i, j in index_order[0:keep_count]: mask[i, j] = 1 return mask def slicAndJpeg(img): for i in range (int(img.shape[1].value/8)): for j in range(int(img.shape[2].value/8)): temp=(img[:,i*8:i*8+8,j*8:j*8+8]) tempb=np.zeros((8,8)) DCT(temp,tempb) mask=quntize_mask(8,9) qunz=Kr.layers.multiply(mask,tempb) tempc=K.zeros((8,8)) IDCT(qunz,tempc) img[:,i*8:i*8+8,j*8:j*8+8]=tempc class JPEGLayer(Layer): def __init__(self,**kwargs): super(JPEGLayer, self).__init__(**kwargs) self.supports_masking = True def call(self, noised_image, training=True): def noise(): # noised_image = noised_and_cover # pad the image so that we can do dct on 8x8 blocks pad_height = (8 - noised_image.shape[1] % 8) % 8 pad_width = (8 - noised_image.shape[2] % 8) % 8 noised_image_pad = Kr.layers.ZeroPadding2D(padding=(( pad_width, 0),( pad_height,0)))(noised_image) slicAndJpeg(K.eval(noised_image_pad)) # un-pad noised_and_cover = noised_image_pad[ :, :noised_image_pad.shape[1]-pad_height, :noised_image_pad.shape[2]-pad_width] return noised_and_cover return noise() #-----------------building w train--------------------------------------------- wt_random=np.random.randint(2, size=(49999,4,4)) w_expand=wt_random.astype(np.float32) wv_random=np.random.randint(2, size=(9999,4,4)) wv_expand=wv_random.astype(np.float32) x,y,z=w_expand.shape w_expand=w_expand.reshape((x,y,z,1)) x,y,z=wv_expand.shape wv_expand=wv_expand.reshape((x,y,z,1)) #-----------------building w test--------------------------------------------- w_test = np.random.randint(2,size=(1,4,4)) w_test=w_test.astype(np.float32) w_test=w_test.reshape((1,4,4,1)) #-----------------------encoder------------------------------------------------ #------------------------------------------------------------------------------ image = Input((28, 28, 1)) conv1 = Conv2D(64, (5, 5),activation='relu',padding='same', name='convl1e')(image) wtm=Input((4,4,1)) #-------------------------------------------------------------- wpad=Kr.layers.Lambda(lambda xy: xy[0] + Kr.backend.spatial_2d_padding(xy[1], padding=((0, 24), (0, 24)))) encoded_merged=wpad([conv1,wtm])#-----------------------decoder------------------------------------------------ #------------------------------------------------------------------------------ decoded = Conv2D(1, (5, 5),activation='relu', padding='same', name='decoder_output')(encoded_merged) model=Model(inputs=[image,wtm],outputs=decoded) model.summary() decoded_noise=JPEGLayer()(decoded)#16 #----------------------w extraction------------------------------------ convw1 = Conv2D(64, (5,5),activation='relu' , name='conl1w')(decoded_noise)#24 convw2 = Conv2D(64, (5,5),activation='relu' , name='conl2w')(convw1)#20 #Avw1=AveragePooling2D(pool_size=(2,2))(convw2) convw3 = Conv2D(64, (5,5),activation='relu' ,name='conl3w')(convw2)#16 convw4 = Conv2D(64, (5,5), activation='relu' ,name='conl4w')(convw3)#12 #Avw2=AveragePooling2D(pool_size=(2,2))(convw4) convw5 = Conv2D(64, (5,5), activation='relu' ,name='conl5w')(convw4)#8 convw6 = Conv2D(64, (5,5), activation='relu' ,name='conl6w')(convw5)#4 pred_w = Conv2D(1, (1, 1),activation='relu' ,padding='same', name='reconstructed_W')(convw6) model1=Model(inputs=[image,wtm],outputs=[decoded,pred_w]) model1.summary() #----------------------training the model-------------------------------------- #------------------------------------------------------------------------------ #----------------------Data preparesion---------------------------------------- (x_train, _), (x_test, _) = mnist.load_data() x_validation=x_train[1:10000,:,:] x_train=x_train[10001:60000,:,:] # x_train = x_train.astype('float32') / 255. x_test = x_test.astype('float32') / 255. x_validation = x_validation.astype('float32') / 255. x_train = np.reshape(x_train, (len(x_train), 28, 28, 1)) # adapt this if using `channels_first` image data format x_test = np.reshape(x_test, (len(x_test), 28, 28, 1)) # adapt this if using `channels_first` image data format x_validation = np.reshape(x_validation, (len(x_validation), 28, 28, 1)) #---------------------compile and train the model------------------------------ opt=SGD(momentum=0.99,lr=0.0001) model1.compile(optimizer='adam', loss={'imageprim':'mse','wprim':'binary_crossentropy'}, loss_weights={'imageprim': 0.5, 'wprim': 1.0},metrics=['mae']) es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=40) #rlrp = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=20, min_delta=1E-4, verbose=1) mc = ModelCheckpoint('sendAct.h5', monitor='val_loss', mode='min', verbose=1, save_best_only=True) history=model1.fit([x_train,w_expand], [x_train,w_expand], epochs=4000, batch_size=32, validation_data=([x_validation,wv_expand], [x_validation,wv_expand]), callbacks=[TensorBoard(log_dir='/home/jamalm8/tensorboardGNWLoss/', histogram_freq=0, write_graph=False),es,mc]) model1.summary()