Create a Python based custom gradient function for an operation? (without implementation in C ++)

I am trying to write my own gradient function for 'my_op', which for example contains only a call to tf.identity () (ideally, this could be any chart).

import tensorflow as tf from tensorflow.python.framework import function def my_op_grad(x): return [tf.sigmoid(x)] @function.Defun(a=tf.float32, python_grad_func=my_op_grad) def my_op(a): return tf.identity(a) a = tf.Variable(tf.constant([5., 4., 3., 2., 1.], dtype=tf.float32)) sess = tf.Session() sess.run(tf.initialize_all_variables()) grad = tf.gradients(my_op(a), [a])[0] result = sess.run(grad) print(result) sess.close() 

Unfortunately, I get the following error:

 Traceback (most recent call last): File "custom_op.py", line 19, in <module> grad = tf.gradients(my_op(a), [a])[0] File "/Users/njk/tfm/lib/python3.5/site-packages/tensorflow/python/framework/function.py", line 528, in __call__ return call_function(self._definition, *args, **kwargs) File "/Users/njk/tfm/lib/python3.5/site-packages/tensorflow/python/framework/function.py", line 267, in call_function compute_shapes=False) File "/Users/njk/tfm/lib/python3.5/site-packages/tensorflow/python/framework/ops.py", line 2285, in create_op raise TypeError("Input #%d is not a tensor: %s" % (idx, a)) TypeError: Input #0 is not a tensor: <tensorflow.python.ops.variables.Variable object at 0x1080d2710> 

I know that you can create a custom C ++ operation, but in my case I just need to write my own gradient for a function that can be easily written in Python using standard TensorFlow operations, so I would like to avoid writing unnecessary C ++ code .

In addition, I am using an upstream version of TensorFlow from GitHub.

+5
source share
2 answers

Note that python_grad_func needs the same interface as ops.RegisterGradient ( https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/framework/function.py#L349 ).

Here is an example of modified code:

 def my_op_grad(op, grad): ### instead of my_op_grad(x) return tf.sigmoid(op.inputs[0]) @function.Defun(a=tf.float32, python_grad_func=my_op_grad) def my_op(a): return tf.identity(a) def main(unused_argv): a = tf.Variable(tf.constant([-5., 4., -3., 2., 1.], dtype=tf.float32)) sess = tf.Session() sess.run(tf.initialize_all_variables()) a = tf.identity(a) #workaround for bug github.com/tensorflow/tensorflow/issues/3710 grad = tf.gradients(my_op(a), [a])[0] result = sess.run(grad) print(result) sess.close() 

Output:

 [ 0.00669286 0.98201376 0.04742587 0.88079709 0.7310586 ] 
+3
source

The following looks great. Do you have any reason preferring python_grad_func instead?

 @tf.function.Defun(tf.float32, tf.float32) def bprop(x, dy): return tf.sigmoid(x) @tf.function.Defun(tf.float32, grad_func=bprop) def fprop(x): return x # identity a = tf.Variable(tf.constant([-5., 4., -3., 2., 1.], dtype=tf.float32)) grad = tf.gradients(fprop(a), [a]) with tf.Session() as sess: sess.run(tf.initialize_all_variables()) result = sess.run(grad) print(result) 
+2
source

All Articles