Search Options

Results per page
Sort
Preferred Languages
Advance

Results 41 - 50 of 74 for getElementDtype (0.28 sec)

  1. tensorflow/compiler/mlir/tensorflow/utils/xla_sharding_util.cc

                    split_dimension, num_split));
          }
    
          shape[split_dimension] = shape[split_dimension] / num_split;
          output_type =
              mlir::RankedTensorType::get(shape, input_type.getElementType());
        }
      } else {
        output_type = input_type;
      }
    
      // Creates a split op that splits |src_input| along |split_dimension|.
      llvm::SmallVector<mlir::Type, 4> output_types(num_split, output_type);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 22 21:28:13 UTC 2024
    - 34K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tensorflow/ir/tf_executor.cc

        // the operand can be tensor of type T or T_REF.
        bool is_output_ref = mlir::isa<tf_type::TensorFlowRefType>(
            output_tensor_type.getElementType());
        if (is_output_ref && !mlir::isa<tf_type::TensorFlowRefType>(
                                 operand0_tensor_type.getElementType())) {
          return switchn.emitOpError()
                 << "expects same operand and output element type but got "
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 42.7K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/quantization/tensorflow/passes/replace_cast_hacks_with_tf_xla_ops.cc

      if (!broadcasted_shapes_or.has_value()) return;
      const auto broadcasted_input_type = RankedTensorType::get(
          broadcasted_shapes_or->first, input_type.getElementType());
      const auto broadcasted_weight_type = RankedTensorType::get(
          broadcasted_shapes_or->second, weight_type.getElementType());
    
      if (broadcasted_input_type.hasStaticShape() &&
          broadcasted_weight_type.hasStaticShape()) {
        input = builder.create<TF::BroadcastToOp>(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 47.1K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/lite/utils/lstm_utils_test.cc

      // input layer norm is None
      EXPECT_TRUE(mlir::isa<NoneType>(it->getOperand(20).getType()));
      // proj_bias is F32
      EXPECT_TRUE(mlir::cast<RankedTensorType>(it->getOperand(17).getType())
                      .getElementType()
                      .isF32());
    
      // output gate bias is 0 since it is out of bounds of the bias tensor, so
      // we set its value as a const tensor of specified size and value 0.
      EXPECT_TRUE(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 10K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/quantization/import_quant_stats_pass.cc

        if (index < 0 || index >= static_cast<int>(op->getNumResults()))
          return false;
        Value res = op->getResult(index);
        return res.getType().isa<ShapedType>() &&
               res.getType().cast<ShapedType>().getElementType().isa<FloatType>();
      }
    
      // A method to retrieve the name for the given op.
      OperationToName op_to_name_;
    
      // We split the normal names and regex names, since the former can use hash
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Mar 08 10:41:08 UTC 2024
    - 9.9K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/transforms/lift_tflite_flex_ops.cc

        if (auto tensor_array_v3_op = dyn_cast<TF::TensorArrayV3Op>(tf_op)) {
          Value handle = tensor_array_v3_op.getHandle();
          auto handle_type = mlir::cast<TensorType>(handle.getType());
          if (handle_type.getElementType().isInteger(/*width=*/32)) {
            Type resource_tensor_type =
                handle_type.clone(TF::ResourceType::get(rewriter.getContext()));
            handle.setType(resource_tensor_type);
          }
        }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 10.9K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/stablehlo/transforms/unfuse_batch_norm_pass.cc

            mlir::dyn_cast<RankedTensorType>(bn_op.getVariance().getType());
        if (!input_type || !variance_type) {
          return failure();
        }
        auto fp_type = mlir::dyn_cast<FloatType>(variance_type.getElementType());
        if (!fp_type) {
          return failure();
        }
    
        // result = (x - mean) * scale / sqrt(variance + epsilon) + offset
        // Let multiplier = scale / sqrt(variance + epsilon), to compute
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 11.2K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tensorflow/utils/convert_tensor_test.cc

        tensor.flat<T>().setValues(values);
    
        auto value_or = ConvertTensor(tensor, &b);
        TF_ASSERT_OK(value_or.status());
        auto attr = value_or.value();
    
        EXPECT_EQ(attr.getShapedType().getElementType(), expected_ty);
    
        Tensor out;
        TF_ASSERT_OK(ConvertToTensor(attr, &out));
    
        test::ExpectTensorEqual<T>(tensor, out);
      }
    };
    
    TEST_F(ConvertTensorTest, Simple) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 10.4K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/tensorflow/transforms/functional_control_flow_to_regions.cc

    Value ConvertConditionToBoolean(Operation* op, Value cond) {
      if (auto ranked_type = mlir::dyn_cast<RankedTensorType>(cond.getType()))
        if (ranked_type.getRank() == 0 &&
            ranked_type.getElementType().isSignlessInteger(1))
          return cond;
    
      OpBuilder builder(op);
      Value to_bool = builder.create<TF::ToBoolOp>(op->getLoc(), cond);
      CopyDeviceAndUnderscoredAttributes(op, to_bool.getDefiningOp());
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 11K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/quantization/stablehlo/passes/insert_weight_param.cc

      LogicalResult match(Operation* op) const override {
        if (op->getNumResults() != 1) {
          return failure();
        }
        auto type = mlir::cast<TensorType>(op->getResult(0).getType());
        if (!type || !type.getElementType().isF32()) {
          return failure();
        }
        return success(
            op->hasOneUse() &&
            IsWeightQuantizableFunction(*op->getUses().begin(), type.getRank()));
      }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 09 05:56:10 UTC 2024
    - 10.2K bytes
    - Viewed (0)
Back to top