admin管理员组

文章数量:1313731

I am using tensorflow to fit some version of a financial model. I have subclassed the keras.Model class to write some custom code. Everything seems to work well when I run the model in eager execution. To speed things up I wanted to use the functionality provided by the tf.function decorator. I decorated two functions, the model call itself and the custom train_step. If I only decorate the train_step, everything works fine. As soon as I add the call of the model, I get the following error message:

raise ValueError("No gradients provided for any variable.")

Has anyone seen something similar or has an idea what is going wrong here?

Thank you very much for the help.

The model file:

import tensorflow as tf
import keras

class bondPriceModel(keras.Model):
    def __init__(self, econDict, setupDict, GHDict, **kwargs):
        super().__init__(**kwargs)
        self.econDict = econDict
        self.setupDict = setupDict
        self.GHDict = GHDict
        # Layers
        self.hidden1 = keras.layers.Dense(setupDict['layerNodes'][0], activation='relu', kernel_initializer='he_normal', name='hidden1')
        self.hidden2 = keras.layers.Dense(setupDict['layerNodes'][1], activation='relu', kernel_initializer='he_normal', name='hidden2')
        self.outputs = keras.layers.Dense(1, activation='sigmoid', kernel_initializer='he_normal', bias_initializer='ones',
                                     name='outputBP')
        # Set none values for variables

    @tf.function
    def call(self, inputs):
        x = self.hidden1(inputs)
        x = self.hidden2(x)
        x = self.outputs(x)
        return x

    def compile(self, optimizer, loss_fn):
        super()pile(optimizer=optimizer, loss=loss_fn)

    @tf.function
    def train_step(self, x_batch_train, y_batch_train):
        with tf.GradientTape() as tape:
            y_pred = self(x_batch_train, training=True)
            loss_value = self.loss(y_batch_train, y_pred)
        grads = tape.gradient(loss_value, self.trainable_weights)
        self.optimizer.apply_gradients(zip(grads, self.trainable_weights))
        return loss_value

The script:

'''
This file contains the runtime code with all steps necessary to  solve the underlying model.
So far implemented:
- Rep-Agent utility/consumption ratio solver using a fixed-point iteration
- Next: Implement risk-free bond pricing function.
'''
import random
import os
os.environ["OMP_NUM_THREADS"] = "4"
import numpy as np
import tensorflow as tf
from main.parameters import econDict
import keras
from helpers.bondPriceModel import bondPriceModel



# Specify Neural Network parameters
print('##### Setup Model #####')
setupDict = {}
setupDict['learningRate'] = 1e-5
setupDict['epochs'] = 150
setupDict['batchSize'] = 128
setupDict['nrOfBatches'] = 80
setupDict['inputShape'] = 6
setupDict['outputShape'] = 4
setupDict['layerNodes'] = [32 * 20, 32 * 20]
setupDict['seed'] = 1
setupDict['simLength'] = setupDict['nrOfBatches'] * setupDict['batchSize']

# Set up model for Bond Price
bondPriceNN = bondPriceModel(econDict, setupDict, [])
optimizerBP = keras.optimizers.Adam(learning_rate=setupDict['learningRate'])
loss_fnBP = keras.losses.MeanSquaredError(reduction="sum_over_batch_size", name="mean_squared_error")
bondPriceNNpile(optimizerBP, loss_fnBP)
# Initialize the model states
random.seed(setupDict['seed'])
tf.random.set_seed(100)
# Fit first pass of the model
stateInit = np.vstack((np.ones(setupDict['simLength'])*0.15 + np.random.normal(0, 0.15/4, size=setupDict['simLength']),
                      np.ones(setupDict['simLength'])*0.005 + np.random.normal(0, 0.005/4, size=setupDict['simLength']),
                      np.ones(setupDict['simLength'])*0.2 + np.random.normal(0, 0.2/4, size=setupDict['simLength']),
                      np.random.normal(0, 0.0055, size=setupDict['simLength']),
                      np.random.normal(0, 0.012, size=setupDict['simLength']),
                      np.random.normal(0, 0.00104, size=setupDict['simLength']))).T
bondPriceInit = np.zeros(setupDict['simLength'])
for i in range(setupDict['simLength']):
    bondPriceInit[i] = 0.97 + np.random.normal(0, 0.005)

train_datasetBP = tf.data.Dataset.from_tensor_slices((tf.convert_to_tensor(stateInit, dtype=tf.float32), bondPriceInit))
train_datasetBP = train_datasetBP.shuffle(buffer_size=256).batch(setupDict['batchSize'])

for epoch in range(setupDict['epochs']):
    for step, (x_batch_train, y_batch_train) in enumerate(train_datasetBP):
        loss_valueBP = bondPriceNN.train_step(x_batch_train, y_batch_train)
        # Log every 2 batches.
        if step % 2 == 0:
            print(
                f"Training loss (for one batch) at step {step}: {float(loss_valueBP)}"
            )

本文标签: pythontffunction decorator on custom model call ErrorStack Overflow