Skip to content

Commit

Permalink
update tensorflow serving protos to 2.6.0
Browse files Browse the repository at this point in the history
update .bazelrc embed labael to 2.6.0
  • Loading branch information
emacski committed Aug 27, 2021
1 parent 056785b commit 0f80e68
Show file tree
Hide file tree
Showing 31 changed files with 539 additions and 165 deletions.
2 changes: 1 addition & 1 deletion .bazelrc
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
# upstream tensorflow/serving version
build --embed_label=2.5.1
build --embed_label=2.6.0

build --verbose_failures
# enable proper toolchain resolution for cc rules
Expand Down
3 changes: 2 additions & 1 deletion proto/tensorflow/core/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -17,13 +17,14 @@ proto_library(
srcs = [
"protobuf/bfc_memory_map.proto",
"protobuf/cluster.proto",
"protobuf/composite_tensor_variant.proto",
"protobuf/config.proto",
"protobuf/control_flow.proto",
"protobuf/data_service.proto",
"protobuf/debug.proto",
"protobuf/debug_event.proto",
"protobuf/device_filters.proto",
"protobuf/device_properties.proto",
"protobuf/extension_type_variant.proto",
"protobuf/graph_debug_info.proto",
"protobuf/meta_graph.proto",
"protobuf/named_tensor.proto",
Expand Down
12 changes: 12 additions & 0 deletions proto/tensorflow/core/framework/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ proto_library(
":cost_graph_proto",
":dataset_options_proto",
":device_attributes_proto",
":full_type_proto",
":function_proto",
":graph_proto",
":graph_transfer_info_proto",
Expand Down Expand Up @@ -145,6 +146,7 @@ proto_library(
strip_import_prefix = "/proto/",
deps = [
":attr_value_proto",
":full_type_proto",
":resource_handle_proto",
":tensor_proto",
":tensor_shape_proto",
Expand All @@ -159,6 +161,7 @@ proto_library(
strip_import_prefix = "/proto/",
deps = [
":attr_value_proto",
":full_type_proto",
":resource_handle_proto",
":tensor_proto",
":tensor_shape_proto",
Expand Down Expand Up @@ -215,6 +218,7 @@ proto_library(
strip_import_prefix = "/proto/",
deps = [
":attr_value_proto",
":full_type_proto",
":function_proto",
":node_def_proto",
":op_def_proto",
Expand All @@ -233,6 +237,7 @@ proto_library(
strip_import_prefix = "/proto/",
deps = [
":attr_value_proto",
":full_type_proto",
":node_def_proto",
":op_def_proto",
":resource_handle_proto",
Expand All @@ -242,6 +247,13 @@ proto_library(
],
)

proto_library(
name = "full_type_proto",
srcs = ["full_type.proto"],
go_package = "github.com/emacski/tensorflow-serving-arm-client/go/tensorflow/core/framework",
strip_import_prefix = "/proto/",
)

proto_library(
name = "device_attributes_proto",
srcs = ["device_attributes.proto"],
Expand Down
97 changes: 43 additions & 54 deletions proto/tensorflow/core/framework/dataset_options.proto
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,8 @@ syntax = "proto3";

package tensorflow.data;

option go_package = "github.com/tensorflow/tensorflow/tensorflow/go/core/framework/dataset_options_go_proto";

// Represents the type of auto-sharding we enable.
enum AutoShardPolicy {
// AUTO: Attempts FILE-based sharding, falling back to DATA-based sharding.
Expand All @@ -23,6 +25,27 @@ enum AutoShardPolicy {
OFF = -1;
}

// next: 4
message AutotuneOptions {
// Whether to automatically tune performance knobs.
oneof optional_enabled {
bool enabled = 1;
}
// When autotuning is enabled (through autotune), determines the CPU budget to
// use. Values greater than the number of schedulable CPU cores are allowed
// but may result in CPU contention.
oneof optional_cpu_budget {
int32 cpu_budget = 2;
}
// When autotuning is enabled (through autotune), determines the RAM budget to
// use. Values greater than the available RAM in bytes may result in OOM. If
// 0, defaults to half of the available RAM in bytes.
oneof optional_ram_budget {
int64 ram_budget = 3;
}
}

// next: 3
message DistributeOptions {
AutoShardPolicy auto_shard_policy = 1;
// The number of devices attached to this input pipeline.
Expand All @@ -31,59 +54,25 @@ message DistributeOptions {
}
}

message MapVectorization {
// Whether to vectorize map transformations.
oneof optional_enabled {
bool enabled = 1;
}
// Whether to use ChooseFastestBranchDataset with this transformation. If
// True, the pipeline picks between the vectorized and original segment at
// runtime based on their iterations speed.
oneof optional_use_choose_fastest {
bool use_choose_fastest = 2;
}
}

// next: 18
message OptimizationOptions {
// Whether to apply default graph optimizations. If False, only graph
// optimizations that have been explicitly enabled will be applied.
oneof optional_apply_default_optimizations {
bool apply_default_optimizations = 1;
}
// Whether to automatically tune performance knobs.
oneof optional_autotune {
bool autotune = 2;
}
// When autotuning is enabled (through autotune), determines whether to also
// autotune buffer sizes for datasets with parallelism.
oneof optional_autotune_buffers {
bool autotune_buffers = 3;
}
// When autotuning is enabled (through autotune), determines the CPU budget to
// use. Values greater than the number of schedulable CPU cores are allowed
// but may result in CPU contention.
oneof optional_autotune_cpu_budget {
int32 autotune_cpu_budget = 4;
}
// When autotuning is enabled (through autotune), determines the RAM budget to
// use. Values greater than the available RAM in bytes may result in OOM. If
// 0, defaults to half of the available RAM in bytes.
oneof optional_autotune_ram_budget {
int32 autotune_ram_budget = 5;
}
reserved 2;
reserved 3;
reserved 4;
reserved 5;
// Whether to fuse filter transformations.
oneof optional_filter_fusion {
bool filter_fusion = 6;
}
// Whether to fuse filter dataset that predicts random_uniform < rate into a
// sampling dataset.
oneof optional_filter_with_random_uniform_fusion {
bool filter_with_random_uniform_fusion = 7;
}
// Whether to hoist tf.random_uniform() ops out of map transformations.
oneof optional_hoist_random_uniform {
bool hoist_random_uniform = 8;
}
// NOTE: field id 7 deleted in June 2021.
reserved 7;
// NOTE: field id 8 deleted in June 2021.
reserved 8;
// Whether to fuse map and batch transformations.
oneof optional_map_and_batch_fusion {
bool map_and_batch_fusion = 9;
Expand All @@ -100,8 +89,10 @@ message OptimizationOptions {
oneof optional_map_parallelization {
bool map_parallelization = 12;
}
// The map vectorization options associated with the dataset.
MapVectorization map_vectorization = 13;

// NOTE: field id 13 deleted in June 2021.
reserved 13;

// Whether to eliminate no-op transformations.
oneof optional_noop_elimination {
bool noop_elimination = 14;
Expand All @@ -115,21 +106,15 @@ message OptimizationOptions {
oneof optional_parallel_batch {
bool parallel_batch = 15;
}
// Whether to reorder ops that will discard data to the front of unary
// cardinality preserving transformations, e.g. dataset.map(...).take(3) will
// be optimized to dataset.take(3).map(...). For now this optimization will
// move `skip`, `shard` and `take` to the front of `map` and `prefetch`. This
// optimization is only for performance; it will not affect the output of the
// dataset.
oneof optional_reorder_data_discarding_ops {
bool reorder_data_discarding_ops = 16;
}
// Field id 16 was removed in 06/2021.
reserved 16;
// Whether to fuse shuffle and repeat transformations.
oneof optional_shuffle_and_repeat_fusion {
bool shuffle_and_repeat_fusion = 17;
}
}

// next: 3
message ThreadingOptions {
// If set, it overrides the maximum degree of intra-op parallelism.
oneof optional_max_intra_op_parallelism {
Expand All @@ -150,12 +135,16 @@ enum ExternalStatePolicy {

// Message stored with Dataset objects to control how datasets are processed and
// optimized.
//
// next: 8
message Options {
// Whether the outputs need to be produced in deterministic order.
oneof optional_deterministic {
bool deterministic = 1;
}
// The distribution strategy options associated with the dataset.
AutotuneOptions autotune_options = 7;
// The distribution strategy options associated with the dataset.
DistributeOptions distribute_options = 2;
// The optimization options associated with the dataset.
OptimizationOptions optimization_options = 3;
Expand Down
5 changes: 5 additions & 0 deletions proto/tensorflow/core/framework/device_attributes.proto
Original file line number Diff line number Diff line change
Expand Up @@ -50,4 +50,9 @@ message DeviceAttributes {

// String representation of the physical device that this device maps to.
string physical_device_desc = 7;

// A physical device ID for use in XLA DeviceAssignments, unique across
// clients in a multi-client setup. Set to -1 if unavailable, non-negative
// otherwise.
int64 xla_global_id = 8;
}
Loading

0 comments on commit 0f80e68

Please sign in to comment.