Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 43 for Dadd (0.03 sec)

  1. tensorflow/compiler/jit/xla_cluster_util_test.cc

          ops::internal::Enter(root.WithOpName("enter_0"), a, "frame_0");
      Output exit_0 = ops::internal::Exit(root.WithOpName("exit_0"), enter_0);
    
      Output add = ops::Add(root.WithOpName("add"), exit_0, exit_0);
    
      Output enter_1 =
          ops::internal::Enter(root.WithOpName("enter_1"), add, "frame_0");
      Output exit_1 = ops::internal::Exit(root.WithOpName("exit_1"), enter_1);
    
      FixupSourceAndSinkEdges(root.graph());
    
      GraphCycles cycles;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 21 09:53:30 UTC 2024
    - 10.8K bytes
    - Viewed (0)
  2. tensorflow/c/while_loop_test.cc

      params_->cond_output = {less_than, 0};
    
      TF_Operation* one = ScalarConst(1, params_->body_graph, s_);
      TF_Operation* add =
          Add(params_->body_inputs[0], {one, 0}, params_->body_graph, s_);
      ASSERT_EQ(TF_OK, TF_GetCode(s_)) << TF_Message(s_);
      params_->body_outputs[0] = {add, 0};
    
      ExpectOK();
    
      // Create backprop graph
      TF_Output grad_output;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 11 06:05:56 UTC 2024
    - 15.3K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/mlir_graph_optimization_pass_test.cc

                                     Status run_status) {
        // Add FallbackEnabled pass that modifies the graph.
        auto optimization_pass =
            std::make_unique<NiceMock<ModifyMlirModulePass>>(run_status);
        ON_CALL(*optimization_pass, GetPassState(_, _, _, _))
            .WillByDefault(Return(pass_state));
        MlirOptimizationPassRegistry::Global().Add(10,
                                                   std::move(optimization_pass));
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Feb 27 08:25:30 UTC 2024
    - 16.1K bytes
    - Viewed (0)
  4. tensorflow/compiler/jit/encapsulate_xla_computations_pass_test.cc

      auto read_w = ops::ReadVariableOp(scope.WithOpName("ReadW"), arg6, DT_FLOAT);
      add_attrs(read_w.node());
    
      auto e = ops::Add(scope.WithOpName("E"), arg0, arg2);
      add_attrs(e.node());
      auto f = ops::Add(scope.WithOpName("F"), read_v, read_w);
      add_attrs(f.node());
      auto g = ops::Add(scope.WithOpName("G"), f, arg3);
      add_attrs(g.node());
    
      auto out0 = ops::_Retval(scope.WithOpName("b_identity_0_retval_RetVal"),
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 16 18:03:15 UTC 2023
    - 14.7K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/tensorflow/transforms/decompose_optionals.cc

      RewritePatternSet pattern_list(&getContext());
      pattern_list.add<HandleOptionalFrom>(&getContext());
      pattern_list.add<HandleOptionalGet>(&getContext());
      pattern_list.add<HandleOptionalNone>(&getContext());
      pattern_list.add<HandleFunc>(&getContext());
      pattern_list.add<HandleCall>(&getContext());
      pattern_list.add<HandleIf>(&getContext());
      FrozenRewritePatternSet patterns(std::move(pattern_list));
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 21:18:05 UTC 2024
    - 9.8K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/tf_tfl_passes.cc

        pass_manager.addNestedPass<mlir::func::FuncOp>(
            mlir::TFL::CreatePostQuantizePass(emit_quant_adaptor_ops));
      }
      pass_manager.addNestedPass<mlir::func::FuncOp>(
          mlir::TFL::CreateOptimizeOpOrderPass());
      // Add optimization pass after quantization for additional fusing
      // opportunities.
    
      if (!pass_config.unfold_batch_matmul) {
        // Enable an optimization pass that transforms FC to BatchMatmul only when
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Jun 06 18:45:51 UTC 2024
    - 25.5K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/lite/transforms/prepare_quantize.cc

      if (quant_specs_.post_training_quantization) {
        patterns_1.add<PrepareLstmOutputScale<LSTMOp>>(ctx);
        patterns_1.add<PrepareLstmOutputScale<UnidirectionalSequenceLSTMOp>>(ctx);
      }
      if (is_qdq_conversion_ ||
          quant_specs_.qdq_conversion_mode != quant::QDQConversionMode::kQDQNone) {
        patterns_1.add<PropagateTransposedPerAxisQuantDim>(ctx);
      }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Apr 25 16:01:03 UTC 2024
    - 17.6K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tensorflow/analysis/side_effect_analysis.cc

            // An unknown side effect dominates other side effects so we don't have
            // to add them and can return here.
            return;
          }
          // Add op-based side effects from regions (if any).
          for (Region& region : op->getRegions()) {
            AddRegionSideEffectsForOp(region, op);
          }
          // Add op-based side effects for the op itself.
          for (const auto& effect : effects) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed May 15 09:04:13 UTC 2024
    - 41.2K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/quantization/stablehlo/passes/quantization_patterns.cc

        const bool enable_per_channel_quantized_weight) {
      patterns.add<XlaCallModuleOpToCallOp<QuantizeConvolutionOpPattern>>(
          ctx, enable_per_channel_quantized_weight);
      patterns.add<XlaCallModuleOpToCallOp<QuantizeDotGeneralOpPattern>>(
          ctx, enable_per_channel_quantized_weight);
      patterns
          .add<XlaCallModuleOpToCallOp<QuantizeWeightOnlyOpPattern<ConvolutionOp>>>(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 03 06:04:36 UTC 2024
    - 41.7K bytes
    - Viewed (0)
  10. tensorflow/compiler/mlir/tfr/ir/tfr_ops.cc

    }
    
    void BuildListOp::getCanonicalizationPatterns(RewritePatternSet &results,
                                                  MLIRContext *context) {
      results.add<BuildConstantListAsAttr>(context);
    }
    
    void TFRQuantRawDataOp::getCanonicalizationPatterns(RewritePatternSet &results,
                                                        MLIRContext *context) {
      results.add<RemoveRawDataOp>(context);
    }
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Nov 21 16:55:41 UTC 2023
    - 38.2K bytes
    - Viewed (0)
Back to top