Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 10 for Hinsu (0.16 sec)

  1. tensorflow/compiler/mlir/tf2xla/transforms/legalize_tf_patterns.td

    // Relu op patterns.
    //===----------------------------------------------------------------------===//
    
    // TODO(hinsu): Make these patterns to TF to TF lowering. Relu6 lowering will
    // require HLO canonicalization of min and max on a tensor to ClampOp.
    
    // TODO(hinsu): Lower quantized types after supporting them in GetScalarOfType.
    def : Pat<(TF_ReluOp AnyTensor:$input),
              (CHLO_BroadcastMaxOp
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon May 06 18:46:23 UTC 2024
    - 34.8K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tensorflow/transforms/prepare_tpu_computation_for_tf_export.cc

        if (auto sharding =
                func.getArgAttrOfType<mlir::StringAttr>(i, kShardingAttr)) {
          if (!sharding.getValue().empty()) {
            BlockArgument arg = func.getArgument(i);
            // TODO(hinsu): Instead of setting both 'sharding' and '_XlaSharding'
            // attributes, only set the 'sharding' attribute. Both attributes are
            // currently required as the XlaSharding xla op kernel doesn't use the
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 11.8K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tensorflow/transforms/lower_tf.td

    //
    // Computation of the reduction axis for the Sum op depends on whether the
    // input is a scalar or not. Restrict pattern to ranked inputs so that input to
    // the Sum op is also ranked.
    
    // TODO(hinsu): Support scalar inputs by introducing reshape to 1D.
    def NonScalarType : Type<Neg<HasAnyRankOfPred<[0]>>, "Non scalar type">;
    
    def LowerSoftmaxCrossEntropyWithLogitsOp : Pattern<
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jun 04 13:30:42 UTC 2024
    - 24.7K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tf2xla/transforms/legalization_op_config.cc

      return *ops;
    }
    
    bool IsOpTypeAllowedTf2XlaFallback(const TypeID& type_id) {
      // Allowlisted TensorFlow ops are known to have well behaved tf2xla kernels
      // building valid MLIR using MlirHloBuilder.
      // TODO(hinsu): Drop explicit allowlist when MLIR based bridge is enabled for
      // all tf2xla kernels.
      // Use a pointer for the static set, so the set is not destructed upon thread
      // end, which would not be thread safe.
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Apr 24 04:08:35 UTC 2024
    - 21.7K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/converter_gen.cc

        os << "  return b.Finish();\n}\n";
      }
    }
    
    // For each TFLite op, emits a builder function that packs the TFLite op into
    // the corresponding FlatBuffer object.
    //
    // TODO(hinsu): Revisit if only builtin_options and mutating_variable_inputs
    // arguments that depend on op definitions should be auto-generated and then
    // operator should be built by the caller because it does not require
    // auto-generation.
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Dec 19 15:05:28 UTC 2023
    - 23.7K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/tf2xla/api/v1/compile_mlir_util.cc

      pm.addNestedPass<mlir::func::FuncOp>(mlir::createCanonicalizerPass());
      // The SCCP pass performs constant propagation across the IR, which, for
      // example, propagates constant arguments into callee functions.
      // TODO(hinsu): Investigate if we really need SCCP pass before shape inference
      // and can do with just one pass after the shape inference.
      pm.addPass(mlir::createSCCPPass());
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 21 17:24:39 UTC 2024
    - 45.3K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/tensorflow/ir/tf_executor_ops.td

    // Token type.
    def TfeTokenType : Type<CPred<"$_self.isa<TokenType>()">, "token">,
                       BuildableType<"$_builder.getType<TokenType>()">;
    
    // TODO(hinsu): Define and use TensorType instead of AnyType for data operands
    // and results. For example, MergeOp output type.
    
    //===----------------------------------------------------------------------===//
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Feb 23 19:35:12 UTC 2023
    - 22K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tensorflow/utils/convert_tensor.cc

        case DT_FLOAT8_E4M3FN:
          return ConvertTensorOfCustomFloatType(input_tensor, type);
        case DT_STRING:
          return ConvertStringTensor(input_tensor, type);
        default:
          // TODO(hinsu): Remove mangling now that there is a special attribute.
          return ElementsAttr(
              mlir::TF::TensorProtoAttr::get(type, MangleTensor(input_tensor)));
      }
    
    #undef CONVERT_FLAT
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Apr 26 09:37:10 UTC 2024
    - 20.5K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/quantization/stablehlo/passes/bridge/convert_tf_quant_ops_to_mhlo.cc

          attr.setValue(ConvertToDenseElementsAttr(
              mlir::cast<ArrayAttr>(attr.getValue()), rewriter));
          converted_attrs.push_back(attr);
        }
      }
      return converted_attrs;
    }
    
    // TODO(hinsu): Move this pattern to legalize_tf after resolving the dependency
    // on the tensor proto.
    class ConvertUniformQuantizedDotHybridOp
        : public OpConversionPattern<TF::UniformQuantizedDotHybridOp> {
     public:
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 17:58:54 UTC 2024
    - 30.9K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/tensorflow/ir/tf_executor.cc

          mlir::isa<tf_type::TensorFlowRefType>(output_tensor_ty.getElementType());
      for (Type operand_type : merge.getOperandTypes()) {
        if (mlir::isa<ControlType>(operand_type)) break;
    
        // TODO(hinsu): Update ControlOperandsAfterAllData trait to verify this
        // constraint.
        TensorType operand_tensor_ty = mlir::dyn_cast<TensorType>(operand_type);
        if (!operand_tensor_ty)
          return merge.emitOpError()
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 42.7K bytes
    - Viewed (0)
Back to top