Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 6 of 6 for i8_1 (0.09 sec)

  1. tensorflow/compiler/jit/deadness_analysis_test.cc

      Output id_1 = ops::Identity(root.WithOpName("id_1"), outputs[1]);
    
      FixupSourceAndSinkEdges(root.graph());
    
      PredicateMapTy predicate_map;
      TF_ASSERT_OK(ComputePredicates(*root.graph(), &predicate_map));
    
      EXPECT_EQ(predicate_map[ControlOutputFor(id_0)], "#false");
      EXPECT_EQ(predicate_map[ControlOutputFor(id_1)], "#true");
    }
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Feb 22 06:59:07 UTC 2024
    - 51.6K bytes
    - Viewed (0)
  2. src/internal/types/testdata/spec/assignability.go

    	i8 Int8,
    	i16 Int16,
    	i32 Int32,
    	i64 Int64,
    	i8_16 Int8_16,
    ) {
    	b = 42
    	b = 42.0
    	// etc.
    
    	i8 = -1 << 7
    	i8 = 1<<7 - 1
    	i16 = -1 << 15
    	i16 = 1<<15 - 1
    	i32 = -1 << 31
    	i32 = 1<<31 - 1
    	i64 = -1 << 63
    	i64 = 1<<63 - 1
    
    	i8_16 = -1 << 7
    	i8_16 = 1<<7 - 1
    	i8_16 = - /* ERRORx `cannot use .* as Int8_16` */ 1 << 15
    	i8_16 = 1 /* ERRORx `cannot use .* as Int8_16` */ <<15 - 1
    }
    
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Thu Nov 09 17:24:42 UTC 2023
    - 5.1K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/quantization/device_target.cc

      i32_max_ = QuantizedType::getDefaultMaximumForInteger(kSigned, k32Bits);
      any_ = AnyQuantizedType();
      qi8_ = AnyQuantizedType::get(kSigned, i8_, f32_, i8_min_, i8_max_);
      qi8n_ = AnyQuantizedType::get(kSigned, i8_, f32_, i8_min_ + 1, i8_max_);
      qi32_ = AnyQuantizedType::get(kSigned, i32_, f32_, i32_min_, i32_max_);
      assert(qi8n_ == qi8n_);
    }
    
    std::optional<KernelSpec> DeviceTarget::GetKernelSpec(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Mar 08 10:41:08 UTC 2024
    - 7.3K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/tfr/tests/raise_to_tf.mlir

      !tfr.attr {tfr.name="K",tfr.type="dtype"}) -> !tfr.tensor<K> attributes {T, K}
    tfr.func @tf__positive_(!tfr.tensor<T>) -> !tfr.tensor<i1_> attributes {T, i1_}
    tfr.func @tf__invalid_type_op_(!tfr.tensor<T>) -> !tfr.tensor<i8_> attributes {T, i8_}
    
    // CHECK-LABEL: decompose_tf_same
    func.func @decompose_tf_same(%arg0: tensor<1x2x3x4x!tf_type.string>) -> tensor<1x2x3x4x!tf_type.string> {
      %0 = "tfr.cast"(%arg0) : (tensor<1x2x3x4x!tf_type.string>) -> !tfr.tensor
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 6.7K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/quantization/device_target.h

          QuantizedMultipliers* output_multipliers, QuantizedRanges* output_ranges);
    
      // A set of parameters are required to build the signatures.
      FloatType f32_;
      IntegerType i8_, i32_;
      int64_t i8_min_, i8_max_, i32_min_, i32_max_;
      quant::AnyQuantizedType any_, qi8_, qi8n_, qi32_;
    
     private:
      // Maps the kernel names to all the available kernels.
      llvm::StringMap<KernelSpecs> specs_;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Mar 08 10:41:08 UTC 2024
    - 7.1K bytes
    - Viewed (0)
  6. src/cmd/compile/internal/ssa/rewritegeneric.go

    	for {
    		i := v_0
    		if i.Op != OpRsh16x64 {
    			break
    		}
    		_ = i.Args[1]
    		x := i.Args[0]
    		i_1 := i.Args[1]
    		if i_1.Op != OpConst64 {
    			break
    		}
    		c := auxIntToInt64(i_1.AuxInt)
    		if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != c || !(c >= 0 && c < 16 && i.Uses == 1) {
    			break
    		}
    		v.reset(OpAnd16)
    Registered: Wed Jun 12 16:32:35 UTC 2024
    - Last Modified: Mon Apr 22 18:24:47 UTC 2024
    - 812.2K bytes
    - Viewed (0)
Back to top