Search Options

Results per page
Sort
Preferred Languages
Advance

Results 51 - 60 of 82 for _input_shapes (0.33 sec)

  1. tensorflow/compiler/mlir/tf2xla/transforms/legalize_tf.cc

        // through entry by entry.
        ArrayRef<int64_t> input_shape = input_type.getShape();
        int input_shape_size = input_shape.size();
        Shape slice_sizes(input_shape.begin(), input_shape.end());
        int slice_dimensions = slice_sizes.size();
        slice_sizes[slice_dimensions - 2] =
            std::min((int64_t)1, input_shape[input_shape_size - 2]);
        slice_sizes[slice_dimensions - 1] =
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 11 20:00:43 UTC 2024
    - 291.8K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tensorflow/transforms/shape_inference.cc

      // on `xla_call_module_context_` for details.
      std::vector<xla::Shape> input_shapes;
      input_shapes.reserve(op.getArgs().size());
      for (mlir::Type type : op.getArgs().getTypes()) {
        input_shapes.push_back(xla::TypeToShape(type));
      }
    
      absl::Status status = loader->RefineDynamicShapes(input_shapes);
      if (!status.ok()) {
        // Do not return false here.
        //
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Jun 08 07:28:49 UTC 2024
    - 134.1K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/tf_to_tfl_flatbuffer.cc

            file->getBuffer(), input_arrays, input_dtypes, input_shapes,
            output_arrays, control_output_arrays, graphdef_conversion_options,
            context);
      }
      return GraphdefToMlirTranslateFunction(file->getBuffer(), input_arrays,
                                             input_dtypes, input_shapes,
                                             output_arrays, control_output_arrays,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 03 18:01:23 UTC 2024
    - 23.8K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/tensorflow/utils/tf_to_uniform_attribute_utils.cc

      int feature_group_cnt = 1;
      ShapedType input_shape =
          mlir::dyn_cast<ShapedType>(op->getOperand(0).getType());
      if (!input_shape) {
        return op->emitError(
            "Only input with known shape is supported for Uniform Quantized "
            "opset.");
      }
    
      if (op->getParentOfType<func::FuncOp>().getName().contains("depthwise_")) {
        feature_group_cnt = input_shape.getDimSize(3);
      }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 18.7K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tensorflow/ir/tf_ops_n_z.cc

      assert(input_shape.size() == stride.size());
    
      for (int i = 0, e = input_shape.size(); i < e; ++i) {
        if (ShapedType::isDynamic(input_shape[i])) continue;
    
        int64_t dim_i = input_shape[i];
        int64_t begin_i = begin[i];
        int64_t end_i = end[i];
        int64_t stride_i = stride[i];
    
        // [0]: mask for begin, [1]: mask for end
        int64_t masks[] = {begin_mask & (1 << i), end_mask & (1 << i)};
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 09 22:07:10 UTC 2024
    - 170.8K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/quantization/tensorflow/passes/quantized_function_library_uniform_quantized.mlir

        %i32_min_filled = "tf.Fill" (%input_shape, %i32_min) : (tensor<*xi32>, tensor<i32>) -> tensor<*xi32>
        %i32_max_filled = "tf.Fill" (%input_shape, %i32_max) : (tensor<*xi32>, tensor<i32>) -> tensor<*xi32>
        %i32_act_max_f32_filled = "tf.Fill" (%input_shape, %i32_act_max_f32) : (tensor<*xi32>, tensor<i32>) -> tensor<*xi32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Aug 29 01:13:58 UTC 2023
    - 19.3K bytes
    - Viewed (0)
  7. tensorflow/compiler/jit/tests/device_compiler_test_helper.h

      JitCompilationListener* listener() const { return listener_; }
    
      // Returns a test graph that will split into two XLA clusters (due to a node
      // with _XlaCompile = false).
      GraphDef GetTestGraph(const PartialTensorShape& input_shape);
    
      // Runs the graph using specified batch size both with and without XLA JIT
      // compilation. Returns an error if the results between the two do not match.
      Status ExecuteWithBatch(const GraphDef& graph, int batch);
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Feb 09 08:24:16 UTC 2024
    - 3.6K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/tf_to_tfl_flatbuffer.h

        const GraphImportConfig& specs, absl::string_view debug_info_file,
        absl::string_view input_arrays, absl::string_view input_dtypes,
        absl::string_view input_shapes, absl::string_view output_arrays,
        absl::string_view control_output_arrays, llvm::SourceMgr* source_mgr,
        mlir::MLIRContext* context);
    
    // Load Saved model (either v1 or v2) into MLIR.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Apr 24 08:30:24 UTC 2024
    - 4.7K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/lite/utils/lstm_utils_test.cc

                                         bool cifg) {
      SmallVector<int64_t, 2> input_shape{1, 2};
      SmallVector<int64_t, 2> weight_shape{3, 12};
      SmallVector<int64_t, 1> bias_shape{2};
      SmallVector<int64_t, 2> projection_shape{1, 2};
      SmallVector<int64_t, 1> layer_norm_scale{4};
      SmallVector<int64_t, 2> output_shape{1, 2};
      auto input_type = RankedTensorType::get(input_shape, builder->getF32Type());
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 10K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/lite/transforms/dilated_conv.h

        expand_op.setOperand(0, stb_op.getInput());
        // Calculate the shape for expand.
        auto input_shape =
            mlir::cast<ShapedType>(stb_op.getInput().getType()).getShape();
        SmallVector<int64_t, 4> expand_shape(input_shape.begin(),
                                             input_shape.end());
        expand_shape.insert(expand_shape.begin() + expand_axis, 1);
    
        auto expand_result_type = RankedTensorType::get(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 20K bytes
    - Viewed (0)
Back to top