Search Options

Results per page
Sort
Preferred Languages
Advance

Results 1 - 10 of 49 for relu (0.11 sec)

  1. tensorflow/compiler/jit/mark_for_compilation_pass_test.cc

        Node* add1 =
            ops::BinaryOp("Add", unstage, b, builder.opts().WithName("add1"));
        Node* relu0 = ops::UnaryOp("Relu", add0, builder.opts().WithName("relu0"));
        ops::UnaryOp("Relu", add1, builder.opts().WithName("relu1"));
        MakeStageNode(builder, "stage", {DT_FLOAT}, {relu0});
    
        return GraphDefBuilderToGraph(builder, graph->get());
      };
    
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 14 10:11:10 UTC 2024
    - 79.6K bytes
    - Viewed (0)
  2. tensorflow/compiler/mlir/quantization/stablehlo/python/integration_test/quantize_model_test.py

          # Check activation functions are explicitly present.
          # If present the last op before return should be stablehlo.clamp for relu6
          # and stablehlo.maximum for relu.
          if activation_fn is nn_ops.relu6:
            self.assertRegex(module_str, r'stablehlo.clamp.*\n.*return')
          elif activation_fn is nn_ops.relu:
            self.assertRegex(module_str, r'stablehlo.maximum.*\n.*return')
        else:
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Tue May 14 06:31:57 UTC 2024
    - 51.4K bytes
    - Viewed (0)
  3. tensorflow/compiler/mlir/lite/transforms/optimize_patterns.td

         (HasRankAtMost<4> $a),
         (HasRankAtMost<4> $b)]>;
    }
    
    // We can eliminate Relu from Relu(SquaredDifference(x, y)),
    // since the result of SquaredDifference is always non-negative.
    // TFLite interpreter doesn't support Relu+int32 for now. So the test cases
    // are failing without the following pattern to optimize Relu away fixes
    // the problem.
    def OptimizeReluSquaredDifference : Pat<
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 16 20:31:41 UTC 2024
    - 66.4K bytes
    - Viewed (0)
  4. tensorflow/compiler/mlir/quantization/tensorflow/tests/replace_cast_hacks_with_tf_xla_ops.mlir

        %10 = "tf.Cast"(%9) {Truncate = false, device = ""} : (tensor<1x3xi32>) -> tensor<1x3xf32>
        %11 = "tf.Mul"(%10, %cst) {device = ""} : (tensor<1x3xf32>, tensor<f32>) -> tensor<1x3xf32>
        %12 = "tf.Relu"(%11) {device = ""} : (tensor<1x3xf32>) -> tensor<1x3xf32>
        return %12 : tensor<1x3xf32>
      }
    // CHECK-LABEL: func @matmul_with_relu
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Mon Oct 30 06:52:55 UTC 2023
    - 81K bytes
    - Viewed (0)
  5. tensorflow/compiler/mlir/lite/experimental/tac/tests/raise-target-subgraphs.mlir

      %1 = "tfl.mul"(%0, %arg2) {tac.device = "GPU", fused_activation_function = "RELU6", tac.inference_type = "FLOAT"} : (tensor<1xf32>, tensor<1xf32>) -> tensor<1xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 74.9K bytes
    - Viewed (0)
  6. tensorflow/compiler/mlir/quantization/stablehlo/tests/passes/lift_quantizable_spots_as_functions.mlir

    // CHECK: return %[[MAX]] : tensor<?x28x28x16xf32>
    // CHECK: }
    
    // -----
    
    // Because the operand of shape_of is other than the target conv,
    // should not match conv relu dynamic pattern.
    
    // CHECK-LABEL: @conv_with_relu_dynamic_shape_not_same_op_fn(
    // CHECK-SAME:                    %[[ARG_0:.*]]: tensor<?x28x28x1xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Fri May 10 04:07:09 UTC 2024
    - 49.8K bytes
    - Viewed (0)
  7. tensorflow/compiler/mlir/quantization/tensorflow/python/integration_test/quantize_model_test_base.py

          dilations: Sequence[int] = (1, 1, 1, 1),
          padding: str = 'SAME',
      ):
        class DepthwiseConvModel(module.Module):
          """A simple model with a single depthwise conv2d, bias and relu."""
    
          def __init__(self):
            self.out_channel_size = filter_shape[2] * filter_shape[3]
    
            # This ensures filters will have different value range per out channel
            self.filters = np.stack(
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu Mar 21 08:51:46 UTC 2024
    - 51.2K bytes
    - Viewed (0)
  8. tensorflow/compiler/mlir/lite/tests/prepare-quantize-post-training.mlir

      %5 = "quantfork.stats"(%4) {layerStats = dense<[-56.2916565, 122.922478]> : tensor<2xf32>} : (tensor<1x4xf32>) -> tensor<1x4xf32>
      %6 = "tfl.svdf"(%0, %1, %2, %3, %5) {fused_activation_function = "RELU", rank = 1 : i32} : (tensor<1x3xf32>, tensor<2x3xf32>, tensor<2x1xf32>, tensor<2xf32>, tensor<1x4xf32>) -> tensor<1x2xf32>
    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Thu May 02 09:41:17 UTC 2024
    - 52.6K bytes
    - Viewed (0)
  9. tensorflow/compiler/jit/mark_for_compilation_pass.cc

    Registered: Sun Jun 16 05:45:23 UTC 2024
    - Last Modified: Wed Feb 21 12:19:41 UTC 2024
    - 85.3K bytes
    - Viewed (0)
  10. platforms/documentation/docs/src/docs/userguide/releases/upgrading/upgrading_version_4.adoc

     * <<#rel4.8:switch_to_publishing_plugins,Maven Publish and Ivy Publish Plugins>> that now support digital signatures with the <<signing_plugin#signing_plugin,Signing Plugin>>.
     * Use native <<#rel5.0:bom_import,BOM import>> in your builds.
     * The <<worker_api.adoc#worker_api,Worker API>> for enabling units of work to run in parallel.
    Registered: Wed Jun 12 18:38:38 UTC 2024
    - Last Modified: Thu Feb 22 03:01:48 UTC 2024
    - 60.1K bytes
    - Viewed (0)
Back to top