Search Options

Results per page
Sort
Preferred Languages
Advance

Results 51 - 60 of 109 for TEST_F (0.27 sec)

  1. tensorflow/c/experimental/saved_model/core/ops/variable_ops_test.cc

      EagerContext* context() { return ctx_.get(); }
    
     private:
      std::unique_ptr<StaticDeviceMgr> device_mgr_;
      EagerContextPtr ctx_;
    };
    
    // Sanity check for variable creation
    TEST_F(VariableOpsTest, CreateVariableSuccessful) {
      // Create a DT_Resource TensorHandle that points to a scalar DT_FLOAT tensor
      ImmediateTensorHandlePtr handle;
      TF_EXPECT_OK(internal::CreateUninitializedResourceVariable(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri Aug 21 19:26:54 UTC 2020
    - 3.8K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/tf2xla/api/v1/compile_tf_graph_test.cc

                &per_core_arg_shapes, client, &compilation_result);
    
        if (!compilation_status.ok()) return compilation_status;
    
        return compilation_result;
      }
    };
    
    TEST_F(CompileTFGraphTest, RecordsStreamzForMlirFallback) {
      CellReader<Histogram> compilation_time(kCompilationTimeStreamzName);
    
      MlirToHloArgs mlir_to_hlo_args = CreateTestMlirToHloArgs();
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Sat Apr 13 08:08:57 UTC 2024
    - 11.7K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/tf2xla/api/v2/tf_dialect_to_executor_test.cc

        }
        return absl::OkStatus();
      }
    
      DialectRegistry registry_;
      MLIRContext context_;
      OwningOpRef<mlir::ModuleOp> mlir_module_;
    };
    
    TEST_F(TensorflowDialectToExecutorTest, ConvertsToExecutor) {
      CellReader<int64_t> compilation_status(kExportStreamzName);
    
      TF_ASSERT_OK(CreateMlirModule("empty_func.mlir"));
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon May 13 23:22:50 UTC 2024
    - 4.4K bytes
    - Viewed (0)
  4. tensorflow/cc/gradients/data_flow_grad_test.cc

        float max_error;
        TF_ASSERT_OK((ComputeGradientError<float, float, float>(
            scope_, xs, x_shapes, ys, y_shapes, &max_error)));
        EXPECT_LT(max_error, 1e-4);
      }
    
      Scope scope_;
    };
    
    TEST_F(DataFlowGradTest, DynamicPartitionGrad) {
      TensorShape data_shape({2, 3, 2});
      auto data = Placeholder(scope_, DT_FLOAT, Placeholder::Shape(data_shape));
      auto partitions = Const(scope_, {{2, 1, 0}, {1, 2, 0}});
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Jan 02 18:49:13 UTC 2018
    - 2.6K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/quantization/stablehlo/cc/saved_model_export_test.cc

    }
    
    // Testing ConvertMlirModuleToExportedModel requires parsing MLIR string to
    // ModuleOp.
    using ConvertMlirModuleToExportedModelTest =
        ::mlir::quant::QuantizationTestBase;
    
    TEST_F(ConvertMlirModuleToExportedModelTest, SimpleGraphDefSet) {
      // Define a module a no-op main function.
      mlir::OwningOpRef<mlir::ModuleOp> module_op = ParseModuleOpString(R"mlir(
        module attributes {tf_saved_model.semantics} {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Mar 20 11:11:25 UTC 2024
    - 19.6K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/lite/quantization/lite/quantize_weights_test.cc

      }
      return nullptr;
    }
    
    TEST_F(QuantizeWeightsTest, QuantizationSucceeds) {
      LoadBasicModel();
      flatbuffers::FlatBufferBuilder builder;
      auto status = QuantizeWeights(&builder, model_, 0);
      EXPECT_EQ(status, kTfLiteOk);
    
      const uint8_t* buffer = builder.GetBufferPointer();
      const Model* output_model = GetModel(buffer);
      ASSERT_TRUE(output_model);
    }
    
    TEST_F(QuantizeWeightsTest, QuantizationFails) {
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Jun 12 23:15:24 UTC 2024
    - 32.3K bytes
    - Viewed (0)
  7. tensorflow/cc/framework/while_gradients_test.cc

        }
      }
    
      Scope scope_;
      std::vector<Output> inputs_;
      std::vector<Output> outputs_;
      std::vector<Output> grad_outputs_;
    };
    
    TEST_F(WhileGradientsTest, Basic) {
      // Create loop: while (i < 10) i += 1
      Init(1);
      CreateLoop(
          [](const Scope& s, const std::vector<Output>& inputs, Output* output) {
            *output = ops::Less(s, inputs[0], 10);
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Sep 27 20:32:17 UTC 2017
    - 7.7K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/tfr/integration/tfr_decompose_ctx_test.cc

      std::vector<NodeAndType> nodes;
      for (auto& node : graph.node_def()) {
        nodes.push_back({node.op(), node.attr().at("T").type()});
      }
      return nodes;
    }
    
    TEST_F(TFRDecomposeContextTest, FLOAT_1_ins) {
      std::vector<NodeDefBuilder::NodeOut> src_list;
      src_list.emplace_back("input", 0, DT_FLOAT);
      NodeDef test_node;
      auto status = NodeDefBuilder("float_add", "MyAddN")
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Sep 06 19:12:29 UTC 2023
    - 5.8K bytes
    - Viewed (0)
  9. tensorflow/compiler/mlir/mlir_graph_optimization_pass_test.cc

                  /* metric name */
                  "/tensorflow/core/mlir_function_pass_graph_conversion_count");
      std::map<MlirOptimizationPassState, std::map<bool, int64_t>>
          pass_result_expected_;
    };
    
    TEST_F(MlirGraphOptimizationPassTest, OptimizationPassFailsNoFallback) {
      Init(Status(absl::StatusCode::kAborted, "aborted"),
           {MlirOptimizationPassState::Enabled});
    
      GraphDef original_graph_def;
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue Feb 27 08:25:30 UTC 2024
    - 16.1K bytes
    - Viewed (0)
  10. tensorflow/compiler/jit/xla_host_send_recv_device_context_test.cc

        device_allocator_ = device_->GetAllocator(device_alloc_attr);
      }
    
     protected:
      std::unique_ptr<Device> device_;
      Allocator* host_allocator_;
      Allocator* device_allocator_;
    };
    
    TEST_F(XlaHostSendRecvDeviceContextTest, CopyDeviceTensorToCPU) {
      SetDevice("GPU");
      Tensor origin_cpu_tensor(host_allocator_, DT_FLOAT, TensorShape({2, 2}));
      test::FillValues<float>(&origin_cpu_tensor, {1.2, 2.3, 3.4, 4.5});
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 17 22:46:36 UTC 2024
    - 7.2K bytes
    - Viewed (0)
Back to top