Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 12 for input_dtype (0.21 sec)

  1. tensorflow/compiler/mlir/lite/transforms/prepare_tf.cc

          return failure();
    
        Value input = tf_op.getInput();
        RankedTensorType input_type =
            mlir::dyn_cast<RankedTensorType>(input.getType());
        // Only rank size four input will be only available by the tf.Conv2D
        // operator verification.
        if (!input_type || input_type.isDynamicDim(3)) {
          return failure();
        }
        // Check if the given op is based on grouped convolution.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 28 21:49:50 UTC 2024
    - 64.6K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/tensorflow/passes/quantize_composite_functions.cc

            break;
          default:
            return nullptr;  // Not yet supported
        }
      } else {
        return nullptr;  // Not yet supported
      }
    
      input_type = input_type.clone(new_storage_type);
      return input_type;
    }
    
    // Replaces quant.qcast op to composite quantize_i8 function.
    class ReplaceQuantizePattern
        : public mlir::OpRewritePattern<quantfork::QuantizeCastOp> {
     public:
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 54.5K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/quantization/lite/quantize_model_test.cc

    TfLiteStatus QuantizeModel(ModelT* model, const TensorType& input_type,
                               const TensorType& output_type, bool allow_float,
                               std::string& output_buffer) {
      return QuantizeModel(model, input_type, output_type, allow_float,
                           /*operator_names=*/{}, TensorType_INT8, output_buffer);
    }
    
    TfLiteStatus QuantizeModel(ModelT* model, const TensorType& input_type,
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 23:15:24 UTC 2024
    - 73.9K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/tensorflow/python/integration_test/quantize_model_test_base.py

        return in_placeholder, output_tensor
    
      def _create_simple_tf1_gather_model(
          self, input_type: dtypes.DType, use_variable_for_filter=False
      ) -> Tuple[core.Tensor, core.Tensor]:
        """Creates a basic gather model.
    
        This is intended to be used for TF1 (graph mode) tests.
    
        Args:
          input_type: type of the input index tensor for gather operation.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Mar 21 08:51:46 UTC 2024
    - 51.2K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tensorflow/transforms/lower_tf.cc

        auto op = cast<SpaceToBatchNDOp>(src_op);
    
        Location loc = op.getLoc();
        auto input_type = mlir::cast<TensorType>(op.getInput().getType());
        auto element_type = input_type.getElementType();
        if (!input_type.hasStaticShape()) {
          return failure();
        }
        ArrayRef<int64_t> input_shape = input_type.getShape();
        auto block_shape_type =
            mlir::cast<TensorType>(op.getBlockShape().getType());
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 74.9K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/quantization/stablehlo/python/integration_test/quantize_model_test.py

          ('use_constant_with_int64_input', np.int64, False),
          ('use_variable_with_int64_input', np.int64, True),
      )
      @test_util.run_v2_only
      def test_gather_model(self, input_type, use_variable):
        model = self._create_gather_model(input_type, use_variable)
    
        save.save(model, self._input_saved_model_path)
    
        rng = np.random.default_rng(seed=42)
        static_input_shape = [6]
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 06:31:57 UTC 2024
    - 51.4K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tensorflow/transforms/sparsecore/embedding_pipelining.cc

          callers.backward->getParentOfType<func::FuncOp>();
    
      const std::vector<Value>& operands = loop_operands_nm0;
    
      // Input types will be the same as the original loop body.
      std::vector<Type> input_types = GetValueTypes(operands);
    
      // Determine the results types.
      // Return ALL outputs, respecting the provided order of the Operations. This
      // makes it straightforward for users of this function to map the return
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 92.9K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/flatbuffer_import.cc

        control_nodes.try_emplace(from);
        control_nodes[to].incoming.insert(from);
      }
    
      llvm::SmallVector<mlir::Type, 2> ret_types;
      llvm::SmallVector<mlir::Type, 4> input_types;
    
      auto func_loc = mlir::NameLoc::get(builder.getStringAttr(name), base_loc);
      std::vector<int> func_inputs = subgraph.inputs;
      if (is_entry_point && !ordered_input_arrays.empty()) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 21 18:21:50 UTC 2024
    - 66.8K bytes
    - Viewed (0)
  9. tensorflow/compiler/jit/encapsulate_subgraphs_pass.cc

      if (inserted) {
        NodeDef arg_def;
        NodeDefBuilder builder(
            absl::StrCat(src_node->name(), "_", src_slot, "_arg"), kArgOp,
            NodeDebugInfo(src_node->def()));
        DataType dtype = edge->dst()->input_type(edge->dst_input());
        builder.Attr("T", dtype);
        builder.Attr("index", arg_index);
        Status s = builder.Finalize(&arg_def);
        if (!s.ok()) return s;
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Feb 22 08:47:20 UTC 2024
    - 51K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/tfr/python/tfr_gen.py

    TFR_BUILTINS = {
        '_tfr_quant_act_range': (TFRTypes.TENSOR, TFRTypes.TENSOR),
        '_tfr_quant_rescale': TFRTypes.TENSOR,
        '_tfr_quant_raw_data': lambda input_type: input_type,
        '_tfr_quant_qparam': (TFRTypes.TENSOR, TFRTypes.TENSOR),
        '_tfr_quant_scale_factor': TFRTypes.TENSOR,
    }
    
    
    class TFRTypeResolver(type_inference.Resolver):
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Apr 27 15:27:03 UTC 2022
    - 55.8K bytes
    - Viewed (0)
Back to top