Wrap C ++ struct in Python with TensorFlow

I am currently writing a custom TensorFlow OP in C ++ that should be wrapped in python. I defined my op and its processor core functor as follows, according to the guide .

#ifndef BUILD_COVARIANCE_FUNCTOR_HEADER
#define BUILD_COVARIANCE_FUNCTOR_HEADER

#define ARD_PARAM_BUFFER_SIZE 256

#define EIGEN_USE_THREADS

#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/shape_inference.h"
#include "tensorflow/core/framework/op_kernel.h"

namespace tensorflow{
    REGISTER_OP("CovMatrixGenerator")
    .Input("input_features: float32")
    .Output("output_latent: float32")
    .SetShapeFn([](::tensorflow::shape_inference::InferenceContext* c) {
      c->set_output(0, c->input(0));
      return Status::OK();
    });

    using CPUDevice = Eigen::ThreadPoolDevice;
    using GPUDevice = Eigen::GpuDevice;

    template <typename D, typename T>
    class CovMatrixGenerator : public OpKernel {
    public:
        explicit CovMatrixGenerator(OpKernelConstruction* context);
        void Compute(OpKernelContext* context) override;
    };

    // Register the CPU kernels.
    #define REGISTER_CPU(T)                                          \
     REGISTER_KERNEL_BUILDER(                                       \
        Name("CovMatrixGenerator").Device(DEVICE_CPU).TypeConstraint<T>("T"), \
        CovMatrixGenerator<CPUDevice, T>);
    REGISTER_CPU(float);
    REGISTER_CPU(int32);

    template<typename T, size_t dim>
    struct ExpQuadParams {
        T ardTheta[dim];
        T theta;
        const size_t dimensionality = dim;

        EIGEN_DEVICE_FUNC
        ExpQuadParams(T thetaInit, T ardThetaInit) {
            for (size_t i = 0; i < dim; i++) {
                ardTheta[i] = ardThetaInit;
            }
            theta = thetaInit;
        }

        ExpQuadParams() : ExpQuadParams(static_cast<T>(1), static_cast<T>(1)) {
            //
        }
    };

    template<typename D, typename T>
    struct BuildCovarianceFunctor {
        void operator()(const D &d, const T *data1, const T *data2, size_t size1, 
            size_t size_2, size_t dim, ExpQuadParams &params, T *out);
    };

    template<typename T>
    struct BuildCovarianceFunctor<CPUDevice, T> {
        void operator()(const CPUDevice &deviceType, const T *data1, const T *data2, size_t size1, 
            size_t size_2, size_t dim, ExpQuadParams &params, T *out);
    };
}

#endif

My implementations are in the cpp file and are omitted. You will notice that I declare a struct ExpQuadParamsthat should be passed to my functors BuildCovarianceFunctor. What I would like to do is expose this structure in Python using TensorFlow so that I can populate data elements in Python and pass them to a C ++ implementation.

Is this possible, and if so, how to do it with TensorFlow? Does Tensorflow have this packaging option?

+6

All Articles