Search Options

Results per page
Sort
Preferred Languages
Advance

Results 121 - 130 of 166 for mat_mul (0.17 sec)

  1. tensorflow/c/eager/c_api_experimental_test.cc

      TFE_TensorHandle* m = TestMatrixTensorHandle(ctx);
      TFE_Op* matmul = MatMulOp(ctx, m, m);
      TFE_TensorHandle* retvals[2] = {nullptr, nullptr};
      int num_retvals = 2;
      TFE_Execute(matmul, &retvals[0], &num_retvals, status);
      EXPECT_EQ(1, num_retvals);
      EXPECT_EQ(TF_OK, TF_GetCode(status)) << TF_Message(status);
      TFE_DeleteOp(matmul);
      TFE_DeleteTensorHandle(m);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Aug 03 03:14:26 UTC 2023
    - 31.5K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/lite/transforms/optimize_batch_matmul.cc

        Value input_rhs = bmm_op.getY();
    
        Value output_lhs =
            bmm_op.getAdjX() ? create_z_x_transpose_op(input_lhs) : input_lhs;
    
        // The rhs need to be transposed if adj_y == false AND this matmul will be
        // legalized to tfl.fully_connected
        Value output_rhs =
            !bmm_op.getAdjY() ? create_z_x_transpose_op(input_rhs) : input_rhs;
    
        Type output_type = bmm_op.getResult().getType();
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 9.6K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/tensorflow/passes/quantized_function_library_uniform_quantized.mlir

    //   ...${key2}...
    // }
    // ```
    // The above template with generate two functions by substituting `key1` and
    // `key2` with given values.
    
    module {
    
      for main_op in ["Conv2D", "DepthwiseConv2D", "MatMul"] {
        parameters[
          {"quantized_ops": ["${main_op}", "BiasAdd"], "act_func": "internal_requantize_no_activation_fn", "output_type": "!tf_type.qint8"},
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Aug 29 01:13:58 UTC 2023
    - 19.3K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_lifting.td

    def MakeOneDimValueBroadcastable : NativeCodeCall<
      "MakeOneDimValueBroadcastable($_builder, $_loc, $0, $1.getType().cast<ShapedType>())">;
    
    // Match convolution op with "NHWC" data format or matmul op.
    def SupportedAffineOpMatcher : NativeCodeCall<
      "MatchSupportedAffineOp($_self, $0, $1, $2)">;
    
    // Checks if a value can be symetrically quantized.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 14 03:24:59 UTC 2024
    - 8.4K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/tensorflow/passes/replace_cast_hacks_with_tf_xla_ops.cc

    #include "xla/xla_data.pb.h"
    
    namespace mlir::quant {
    namespace {
    
    constexpr StringRef kTfQuantCreatedEinsum = "__tf_quant_created_einsum";
    
    // Replaces mixed-type Conv and Matmul cast hacks with TF XLA ops.
    // TODO(b/228403741): Support conversion for dynamic-shaped TF ops.
    class ReplaceCastHacksWithTFXLAOpsPass
        : public PassWrapper<ReplaceCastHacksWithTFXLAOpsPass,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 47.1K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/quantization/common/attrs_and_constraints_test.cc

          return %0 : tensor<2x2xf32>
        }
        func.func private @composite_fn_1(%arg0: tensor<2x2xf32>, %arg1: tensor<2x2xf32>) -> tensor<2x2xf32> attributes {tf_quant.composite_function} {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 17:10:32 UTC 2024
    - 22.9K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/transforms/passes.h

        bool enable_canonicalization, bool disable_fuse_mul_and_fc = false);
    std::unique_ptr<OperationPass<func::FuncOp>> CreateOptimizePass();
    
    // Creates an instance of the Tensorflow Lite batch matmul Optimize pass.
    std::unique_ptr<OperationPass<func::FuncOp>> CreateOptimizeBatchMatmulPass();
    
    // Creates an instance of the TensorFlow Lite dialect PrepareTF pass.
    std::unique_ptr<OperationPass<func::FuncOp>> CreatePrepareTFPass(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Mar 07 21:29:34 UTC 2024
    - 10.9K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/quantization/tensorflow/passes/prepare_lifting.cc

            loc, value, Create1DConstValue(builder, loc, new_shape));
      }
      return ConstantFoldOpIfPossible(value.getDefiningOp()).front();
    }
    
    // Matches convolution op with "NHWC" data format or matmul op with false adj_y.
    // The list of supported ops in this function is:
    // - Conv2DOp
    // - Conv3DOp
    // - DepthwiseConv2dNativeOp
    // - MatMulOp
    // - BatchMatMulV2Op
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 17:58:54 UTC 2024
    - 13.3K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/transforms/passes.td

                 "bool", "false",
                 "Disable folding mul and fully connected ops during optimization pass.">,
      ];
    }
    
    def OptimizeBatchMatmulPass : Pass<"tfl-optimize-batch-matmul", "mlir::func::FuncOp"> {
      let summary = "Optimize FC with BatchMatmul within the TensorFlow Lite dialect";
      let constructor = "CreateOptimizeBatchMatmulPass()";
      let dependentDialects = ["TFL::TensorFlowLiteDialect"];
    }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Apr 24 20:30:06 UTC 2024
    - 22.6K bytes
    - Viewed (0)
  10. tensorflow/c/c_api_experimental_test.cc

      TFE_ContextOptions* tfe_context_options_;
      TFE_Context* tfe_context_;
    };
    
    TEST_F(ShapeInferenceTest, InfersShapesFromInputShapes) {
      TFE_Op* matmul_op;
      matmul_op = TFE_NewOp(tfe_context_, "MatMul", status_);
      CHECK_EQ(TF_OK, TF_GetCode(status_)) << TF_Message(status_);
    
      // Infer shape when everything is known.
      CheckOutputShapes(matmul_op,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jan 17 22:27:52 UTC 2023
    - 13.1K bytes
    - Viewed (0)
Back to top