-
Notifications
You must be signed in to change notification settings - Fork 19.5k
/
backend.py
7572 lines (6200 loc) · 242 KB
/
backend.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras backend API."""
import collections
import itertools
import json
import os
import random
import sys
import threading
import warnings
import weakref
import numpy as np
import tensorflow.compat.v2 as tf
from keras import backend_config
from keras.distribute import distribute_coordinator_utils as dc
from keras.dtensor import dtensor_api as dtensor
from keras.engine import keras_tensor
from keras.utils import control_flow_util
from keras.utils import object_identity
from keras.utils import tf_contextlib
from keras.utils import tf_inspect
from keras.utils import tf_utils
# isort: off
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.eager import context
from tensorflow.python.eager.context import get_config
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util.tf_export import keras_export
from tensorflow.tools.docs import doc_controls
py_all = all
py_sum = sum
py_any = any
# INTERNAL UTILS
# The internal graph maintained by Keras and used by the symbolic Keras APIs
# while executing eagerly (such as the functional API for model-building).
# This is thread-local to allow building separate models in different threads
# concurrently, but comes at the cost of not being able to build one model
# across threads.
_GRAPH = threading.local()
# A graph which is used for constructing functions in eager mode.
_CURRENT_SCRATCH_GRAPH = threading.local()
# This is a thread local object that will hold the default internal TF session
# used by Keras. It can be set manually via `set_session(sess)`.
class SessionLocal(threading.local):
def __init__(self):
super().__init__()
self.session = None
_SESSION = SessionLocal()
# A global dictionary mapping graph objects to an index of counters used
# for various layer/optimizer names in each graph.
# Allows to give unique autogenerated names to layers, in a graph-specific way.
PER_GRAPH_OBJECT_NAME_UIDS = weakref.WeakKeyDictionary()
# A global set tracking what object names have been seen so far.
# Optionally used as an avoid-list when generating names
OBSERVED_NAMES = set()
# _DUMMY_EAGER_GRAPH.key is used as a key in _GRAPH_LEARNING_PHASES.
# We keep a separate reference to it to make sure it does not get removed from
# _GRAPH_LEARNING_PHASES.
# _DummyEagerGraph inherits from threading.local to make its `key` attribute
# thread local. This is needed to make set_learning_phase affect only the
# current thread during eager execution (see b/123096885 for more details).
class _DummyEagerGraph(threading.local):
"""_DummyEagerGraph provides a thread local `key` attribute.
We can't use threading.local directly, i.e. without subclassing, because
gevent monkey patches threading.local and its version does not support
weak references.
"""
class _WeakReferencableClass:
"""This dummy class is needed for two reasons.
- We need something that supports weak references. Basic types like
string and ints don't.
- We need something whose hash and equality are based on object identity
to make sure they are treated as different keys to
_GRAPH_LEARNING_PHASES.
An empty Python class satisfies both of these requirements.
"""
pass
def __init__(self):
# Constructors for classes subclassing threading.local run once
# per thread accessing something in the class. Thus, each thread will
# get a different key.
super().__init__()
self.key = _DummyEagerGraph._WeakReferencableClass()
self.learning_phase_is_set = False
_DUMMY_EAGER_GRAPH = _DummyEagerGraph()
# This boolean flag can be set to True to leave variable initialization
# up to the user.
# Change its value via `manual_variable_initialization(value)`.
_MANUAL_VAR_INIT = False
# This list holds the available devices.
# It is populated when `_get_available_gpus()` is called for the first time.
# We assume our devices don't change henceforth.
_LOCAL_DEVICES = None
# The below functions are kept accessible from backend for compatibility.
epsilon = backend_config.epsilon
floatx = backend_config.floatx
image_data_format = backend_config.image_data_format
set_epsilon = backend_config.set_epsilon
set_floatx = backend_config.set_floatx
set_image_data_format = backend_config.set_image_data_format
@keras_export("keras.backend.backend")
@doc_controls.do_not_generate_docs
def backend():
"""Publicly accessible method for determining the current backend.
Only exists for API compatibility with multi-backend Keras.
Returns:
The string "tensorflow".
"""
return "tensorflow"
@keras_export("keras.backend.cast_to_floatx")
@tf.__internal__.dispatch.add_dispatch_support
@doc_controls.do_not_generate_docs
def cast_to_floatx(x):
"""Cast a Numpy array to the default Keras float type.
Args:
x: Numpy array or TensorFlow tensor.
Returns:
The same array (Numpy array if `x` was a Numpy array, or TensorFlow
tensor if `x` was a tensor), cast to its new type.
Example:
>>> tf.keras.backend.floatx()
'float32'
>>> arr = np.array([1.0, 2.0], dtype='float64')
>>> arr.dtype
dtype('float64')
>>> new_arr = cast_to_floatx(arr)
>>> new_arr
array([1., 2.], dtype=float32)
>>> new_arr.dtype
dtype('float32')
"""
if isinstance(x, (tf.Tensor, tf.Variable, tf.SparseTensor)):
return tf.cast(x, dtype=floatx())
return np.asarray(x, dtype=floatx())
@keras_export("keras.backend.get_uid")
def get_uid(prefix=""):
"""Associates a string prefix with an integer counter in a TensorFlow graph.
Args:
prefix: String prefix to index.
Returns:
Unique integer ID.
Example:
>>> get_uid('dense')
1
>>> get_uid('dense')
2
"""
graph = get_graph()
if graph not in PER_GRAPH_OBJECT_NAME_UIDS:
PER_GRAPH_OBJECT_NAME_UIDS[graph] = collections.defaultdict(int)
layer_name_uids = PER_GRAPH_OBJECT_NAME_UIDS[graph]
layer_name_uids[prefix] += 1
return layer_name_uids[prefix]
@keras_export("keras.backend.reset_uids")
def reset_uids():
"""Resets graph identifiers."""
PER_GRAPH_OBJECT_NAME_UIDS.clear()
OBSERVED_NAMES.clear()
@keras_export("keras.backend.clear_session")
def clear_session():
"""Resets all state generated by Keras.
Keras manages a global state, which it uses to implement the Functional
model-building API and to uniquify autogenerated layer names.
If you are creating many models in a loop, this global state will consume
an increasing amount of memory over time, and you may want to clear it.
Calling `clear_session()` releases the global state: this helps avoid
clutter from old models and layers, especially when memory is limited.
Example 1: calling `clear_session()` when creating models in a loop
```python
for _ in range(100):
# Without `clear_session()`, each iteration of this loop will
# slightly increase the size of the global state managed by Keras
model = tf.keras.Sequential([
tf.keras.layers.Dense(10) for _ in range(10)])
for _ in range(100):
# With `clear_session()` called at the beginning,
# Keras starts with a blank state at each iteration
# and memory consumption is constant over time.
tf.keras.backend.clear_session()
model = tf.keras.Sequential([
tf.keras.layers.Dense(10) for _ in range(10)])
```
Example 2: resetting the layer name generation counter
>>> import tensorflow as tf
>>> layers = [tf.keras.layers.Dense(10) for _ in range(10)]
>>> new_layer = tf.keras.layers.Dense(10)
>>> print(new_layer.name)
dense_10
>>> tf.keras.backend.set_learning_phase(1)
>>> print(tf.keras.backend.learning_phase())
1
>>> tf.keras.backend.clear_session()
>>> new_layer = tf.keras.layers.Dense(10)
>>> print(new_layer.name)
dense
"""
global _SESSION
global _GRAPH_LEARNING_PHASES
global _GRAPH_VARIABLES
global _GRAPH_TF_OPTIMIZERS
global _GRAPH
_GRAPH.graph = None
tf.compat.v1.reset_default_graph()
reset_uids()
if _SESSION.session is not None:
_SESSION.session.close()
_SESSION.session = None
graph = get_graph()
with graph.as_default():
_DUMMY_EAGER_GRAPH.learning_phase_is_set = False
_GRAPH_LEARNING_PHASES = {}
# Create the learning phase placeholder in graph using the default
# factory
phase = _default_learning_phase()
_internal_set_learning_phase(graph, phase)
_GRAPH_VARIABLES.pop(graph, None)
_GRAPH_TF_OPTIMIZERS.pop(graph, None)
if tf.executing_eagerly():
# Clear pending nodes in eager executors, kernel caches and
# step_containers.
context.context().clear_kernel_cache()
# Inject the clear_session function to keras_deps to remove the dependency
# from TFLite to Keras.
tf.__internal__.register_clear_session_function(clear_session)
@keras_export("keras.backend.manual_variable_initialization")
@doc_controls.do_not_generate_docs
def manual_variable_initialization(value):
"""Sets the manual variable initialization flag.
This boolean flag determines whether
variables should be initialized
as they are instantiated (default), or if
the user should handle the initialization
(e.g. via `tf.compat.v1.initialize_all_variables()`).
Args:
value: Python boolean.
"""
global _MANUAL_VAR_INIT
_MANUAL_VAR_INIT = value
@keras_export("keras.backend.learning_phase")
@doc_controls.do_not_generate_docs
def learning_phase():
"""Returns the learning phase flag.
The learning phase flag is a bool tensor (0 = test, 1 = train)
to be passed as input to any Keras function
that uses a different behavior at train time and test time.
Returns:
Learning phase (scalar integer tensor or Python integer).
"""
graph = tf.compat.v1.get_default_graph()
if graph is getattr(_GRAPH, "graph", None):
# Don't enter an init_scope for the learning phase if eager execution
# is enabled but we're inside the Keras workspace graph.
learning_phase = symbolic_learning_phase()
else:
with tf.init_scope():
# We always check & set the learning phase inside the init_scope,
# otherwise the wrong default_graph will be used to look up the
# learning phase inside of functions & defuns.
#
# This is because functions & defuns (both in graph & in eager mode)
# will always execute non-eagerly using a function-specific default
# subgraph.
if context.executing_eagerly():
if _DUMMY_EAGER_GRAPH.key not in _GRAPH_LEARNING_PHASES:
return _default_learning_phase()
else:
return _internal_get_learning_phase(_DUMMY_EAGER_GRAPH.key)
else:
learning_phase = symbolic_learning_phase()
_mark_func_graph_as_unsaveable(graph, learning_phase)
return learning_phase
def global_learning_phase_is_set():
return _DUMMY_EAGER_GRAPH.learning_phase_is_set
def _mark_func_graph_as_unsaveable(graph, learning_phase):
"""Mark graph as unsaveable due to use of symbolic keras learning phase.
Functions that capture the symbolic learning phase cannot be exported to
SavedModel. Mark the funcgraph as unsaveable, so that an error will be
raised if it is exported.
Args:
graph: Graph or FuncGraph object.
learning_phase: Learning phase placeholder or int defined in the graph.
"""
if graph.building_function and is_placeholder(learning_phase):
graph.mark_as_unsaveable(
"The keras learning phase placeholder was used inside a function. "
"Exporting placeholders is not supported when saving out a "
"SavedModel. Please call `tf.keras.backend.set_learning_phase(0)` "
"in the function to set the learning phase to a constant value."
)
def symbolic_learning_phase():
graph = get_graph()
with graph.as_default():
if graph not in _GRAPH_LEARNING_PHASES:
phase = _default_learning_phase()
_internal_set_learning_phase(graph, phase)
return _internal_get_learning_phase(graph)
def _internal_set_learning_phase(graph, value):
global _GRAPH_LEARNING_PHASES
if isinstance(value, tf.Tensor):
# The 'value' here is a tf.Tensor with attribute 'graph'.
# There is a circular reference between key 'graph' and attribute
# 'graph'. So we need use a weakref.ref to refer to the 'value' tensor
# here. Otherwise, it would lead to memory leak.
value_ref = weakref.ref(value)
_GRAPH_LEARNING_PHASES[graph] = value_ref
else:
_GRAPH_LEARNING_PHASES[graph] = value
def _internal_get_learning_phase(graph):
phase = _GRAPH_LEARNING_PHASES.get(graph, None)
if isinstance(phase, weakref.ref):
return phase()
else:
return phase
def _default_learning_phase():
if context.executing_eagerly():
return 0
else:
with name_scope(""):
return tf.compat.v1.placeholder_with_default(
False, shape=(), name="keras_learning_phase"
)
@keras_export("keras.backend.set_learning_phase")
@doc_controls.do_not_generate_docs
def set_learning_phase(value):
"""Sets the learning phase to a fixed value.
The backend learning phase affects any code that calls
`backend.learning_phase()`
In particular, all Keras built-in layers use the learning phase as the
default for the `training` arg to `Layer.__call__`.
User-written layers and models can achieve the same behavior with code that
looks like:
```python
def call(self, inputs, training=None):
if training is None:
training = backend.learning_phase()
```
Args:
value: Learning phase value, either 0 or 1 (integers).
0 = test, 1 = train
Raises:
ValueError: if `value` is neither `0` nor `1`.
"""
warnings.warn(
"`tf.keras.backend.set_learning_phase` is deprecated and "
"will be removed after 2020-10-11. To update it, simply "
"pass a True/False value to the `training` argument of the "
"`__call__` method of your layer or model."
)
deprecated_internal_set_learning_phase(value)
def deprecated_internal_set_learning_phase(value):
"""A deprecated internal implementation of set_learning_phase.
This method is an internal-only version of `set_learning_phase` that
does not raise a deprecation error. It is required because
saved_model needs to keep working with user code that uses the deprecated
learning phase methods until those APIs are fully removed from the public
API.
Specifically SavedModel saving needs to make sure the learning phase is 0
during tracing even if users overwrote it to a different value.
But, we don't want to raise deprecation warnings for users when savedmodel
sets learning phase just for compatibility with code that relied on
explicitly setting the learning phase for other values.
Args:
value: Learning phase value, either 0 or 1 (integers).
0 = test, 1 = train
Raises:
ValueError: if `value` is neither `0` nor `1`.
"""
if value not in {0, 1}:
raise ValueError("Expected learning phase to be 0 or 1.")
with tf.init_scope():
if tf.executing_eagerly():
# In an eager context, the learning phase values applies to both the
# eager context and the internal Keras graph.
_DUMMY_EAGER_GRAPH.learning_phase_is_set = True
_internal_set_learning_phase(_DUMMY_EAGER_GRAPH.key, value)
_internal_set_learning_phase(get_graph(), value)
@keras_export("keras.backend.learning_phase_scope")
@tf_contextlib.contextmanager
@doc_controls.do_not_generate_docs
def learning_phase_scope(value):
"""Provides a scope within which the learning phase is equal to `value`.
The learning phase gets restored to its original value upon exiting the
scope.
Args:
value: Learning phase value, either 0 or 1 (integers).
0 = test, 1 = train
Yields:
None.
Raises:
ValueError: if `value` is neither `0` nor `1`.
"""
warnings.warn(
"`tf.keras.backend.learning_phase_scope` is deprecated and "
"will be removed after 2020-10-11. To update it, simply "
"pass a True/False value to the `training` argument of the "
"`__call__` method of your layer or model.",
stacklevel=2,
)
with deprecated_internal_learning_phase_scope(value):
try:
yield
finally:
pass
@tf_contextlib.contextmanager
def deprecated_internal_learning_phase_scope(value):
"""An internal-only version of `learning_phase_scope`.
Unlike the public method, this method does not raise a deprecation warning.
This is needed because saved model saving needs to set learning phase
to maintain compatibility
with code that sets/gets the learning phase, but saved model
saving itself shouldn't raise a deprecation warning.
We can get rid of this method and its usages when the public API is
removed.
Args:
value: Learning phase value, either 0 or 1 (integers).
0 = test, 1 = train
Yields:
None.
Raises:
ValueError: if `value` is neither `0` nor `1`.
"""
global _GRAPH_LEARNING_PHASES
if value not in {0, 1}:
raise ValueError("Expected learning phase to be 0 or 1.")
with tf.init_scope():
if tf.executing_eagerly():
previous_eager_value = _internal_get_learning_phase(
_DUMMY_EAGER_GRAPH.key
)
previous_graph_value = _internal_get_learning_phase(get_graph())
learning_phase_previously_set = _DUMMY_EAGER_GRAPH.learning_phase_is_set
try:
deprecated_internal_set_learning_phase(value)
yield
finally:
# Restore learning phase to initial value.
if not learning_phase_previously_set:
_DUMMY_EAGER_GRAPH.learning_phase_is_set = False
with tf.init_scope():
if tf.executing_eagerly():
if previous_eager_value is not None:
_internal_set_learning_phase(
_DUMMY_EAGER_GRAPH.key, previous_eager_value
)
elif _DUMMY_EAGER_GRAPH.key in _GRAPH_LEARNING_PHASES:
del _GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH.key]
graph = get_graph()
if previous_graph_value is not None:
_internal_set_learning_phase(graph, previous_graph_value)
elif graph in _GRAPH_LEARNING_PHASES:
del _GRAPH_LEARNING_PHASES[graph]
@tf_contextlib.contextmanager
def eager_learning_phase_scope(value):
"""Internal scope that sets the learning phase in eager / tf.function only.
Args:
value: Learning phase value, either 0 or 1 (integers).
0 = test, 1 = train
Yields:
None.
Raises:
ValueError: if `value` is neither `0` nor `1`.
"""
global _GRAPH_LEARNING_PHASES
assert value in {0, 1}
assert tf.compat.v1.executing_eagerly_outside_functions()
global_learning_phase_was_set = global_learning_phase_is_set()
if global_learning_phase_was_set:
previous_value = learning_phase()
try:
_internal_set_learning_phase(_DUMMY_EAGER_GRAPH.key, value)
yield
finally:
# Restore learning phase to initial value or unset.
if global_learning_phase_was_set:
_internal_set_learning_phase(_DUMMY_EAGER_GRAPH.key, previous_value)
else:
del _GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH.key]
def _as_graph_element(obj):
"""Convert `obj` to a graph element if possible, otherwise return `None`.
Args:
obj: Object to convert.
Returns:
The result of `obj._as_graph_element()` if that method is available;
otherwise `None`.
"""
conv_fn = getattr(obj, "_as_graph_element", None)
if conv_fn and callable(conv_fn):
return conv_fn()
return None
def _assert_same_graph(original_item, item):
"""Fail if the 2 items are from different graphs.
Args:
original_item: Original item to check against.
item: Item to check.
Raises:
ValueError: if graphs do not match.
"""
original_graph = getattr(original_item, "graph", None)
graph = getattr(item, "graph", None)
if original_graph and graph and original_graph is not graph:
raise ValueError(
"%s must be from the same graph as %s (graphs are %s and %s)."
% (item, original_item, graph, original_graph)
)
def _current_graph(op_input_list, graph=None):
"""Returns the appropriate graph to use for the given inputs.
This library method provides a consistent algorithm for choosing the graph
in which an Operation should be constructed:
1. If the default graph is being used to construct a function, we
use the default graph.
2. If the "graph" is specified explicitly, we validate that all of the
inputs in "op_input_list" are compatible with that graph.
3. Otherwise, we attempt to select a graph from the first Operation-
or Tensor-valued input in "op_input_list", and validate that all other
such inputs are in the same graph.
4. If the graph was not specified and it could not be inferred from
"op_input_list", we attempt to use the default graph.
Args:
op_input_list: A list of inputs to an operation, which may include
`Tensor`, `Operation`, and other objects that may be converted to a
graph element.
graph: (Optional) The explicit graph to use.
Raises:
TypeError: If op_input_list is not a list or tuple, or if graph is not a
Graph.
ValueError: If a graph is explicitly passed and not all inputs are from
it, or if the inputs are from multiple graphs, or we could not find a
graph and there was no default graph.
Returns:
The appropriate graph to use for the given inputs.
"""
current_default_graph = tf.compat.v1.get_default_graph()
if current_default_graph.building_function:
return current_default_graph
op_input_list = tuple(op_input_list) # Handle generators correctly
if graph and not isinstance(graph, tf.Graph):
raise TypeError(f"Input graph needs to be a Graph: {graph}")
def _is_symbolic_tensor(tensor):
if hasattr(tf, "is_symbolic_tensor"):
return tf.is_symbolic_tensor(tensor)
return type(tensor) == tf.Tensor
# 1. We validate that all of the inputs are from the same graph. This is
# either the supplied graph parameter, or the first one selected from one
# the graph-element-valued inputs. In the latter case, we hold onto
# that input in original_graph_element so we can provide a more
# informative error if a mismatch is found.
original_graph_element = None
for op_input in op_input_list:
if isinstance(
op_input, (tf.Operation, tf.__internal__.CompositeTensor)
) or _is_symbolic_tensor(op_input):
graph_element = op_input
else:
graph_element = _as_graph_element(op_input)
if graph_element is not None:
if not graph:
original_graph_element = graph_element
graph = getattr(graph_element, "graph", None)
elif original_graph_element is not None:
_assert_same_graph(original_graph_element, graph_element)
elif graph_element.graph is not graph:
raise ValueError(
f"{graph_element} is not from the passed-in graph."
)
# 2. If all else fails, we use the default graph, which is always there.
return graph or current_default_graph
def _get_session(op_input_list=()):
"""Returns the session object for the current thread."""
global _SESSION
default_session = tf.compat.v1.get_default_session()
if default_session is not None:
session = default_session
else:
if tf.inside_function():
raise RuntimeError(
"Cannot get session inside Tensorflow graph function."
)
# If we don't have a session, or that session does not match the current
# graph, create and cache a new session.
if getattr(
_SESSION, "session", None
) is None or _SESSION.session.graph is not _current_graph(
op_input_list
):
# If we are creating the Session inside a tf.distribute.Strategy
# scope, we ask the strategy for the right session options to use.
if tf.distribute.has_strategy():
configure_and_create_distributed_session(
tf.distribute.get_strategy()
)
else:
_SESSION.session = tf.compat.v1.Session(
config=get_default_session_config()
)
session = _SESSION.session
return session
@keras_export(v1=["keras.backend.get_session"])
def get_session(op_input_list=()):
"""Returns the TF session to be used by the backend.
If a default TensorFlow session is available, we will return it.
Else, we will return the global Keras session assuming it matches
the current graph.
If no global Keras session exists at this point:
we will create a new global session.
Note that you can manually set the global session
via `K.set_session(sess)`.
Args:
op_input_list: An option sequence of tensors or ops, which will be used
to determine the current graph. Otherwise the default graph will be
used.
Returns:
A TensorFlow session.
"""
session = _get_session(op_input_list)
if not _MANUAL_VAR_INIT:
with session.graph.as_default():
_initialize_variables(session)
return session
# Inject the get_session function to keras_deps to remove the dependency
# from TFLite to Keras.
tf.__internal__.register_get_session_function(get_session)
# Inject the get_session function to tracking_util to avoid the backward
# dependency from TF to Keras.
tf.__internal__.tracking.register_session_provider(get_session)
def get_graph():
if tf.executing_eagerly():
global _GRAPH
if not getattr(_GRAPH, "graph", None):
_GRAPH.graph = tf.__internal__.FuncGraph("keras_graph")
return _GRAPH.graph
else:
return tf.compat.v1.get_default_graph()
@tf_contextlib.contextmanager
def _scratch_graph(graph=None):
"""Retrieve a shared and temporary func graph.
The eager execution path lifts a subgraph from the keras global graph into
a scratch graph in order to create a function. DistributionStrategies, in
turn, constructs multiple functions as well as a final combined function. In
order for that logic to work correctly, all of the functions need to be
created on the same scratch FuncGraph.
Args:
graph: A graph to be used as the current scratch graph. If not set then
a scratch graph will either be retrieved or created:
Yields:
The current scratch graph.
"""
global _CURRENT_SCRATCH_GRAPH
scratch_graph = getattr(_CURRENT_SCRATCH_GRAPH, "graph", None)
# If scratch graph and `graph` are both configured, they must match.
if (
scratch_graph is not None
and graph is not None
and scratch_graph is not graph
):
raise ValueError("Multiple scratch graphs specified.")
if scratch_graph:
yield scratch_graph
return
graph = graph or tf.__internal__.FuncGraph("keras_scratch_graph")
try:
_CURRENT_SCRATCH_GRAPH.graph = graph
yield graph
finally:
_CURRENT_SCRATCH_GRAPH.graph = None
@keras_export(v1=["keras.backend.set_session"])
def set_session(session):
"""Sets the global TensorFlow session.
Args:
session: A TF Session.
"""
global _SESSION
_SESSION.session = session
def get_default_session_config():
if os.environ.get("OMP_NUM_THREADS"):
logging.warning(
"OMP_NUM_THREADS is no longer used by the default Keras config. "
"To configure the number of threads, use tf.config.threading APIs."
)
config = get_config()
config.allow_soft_placement = True
return config
def get_default_graph_uid_map():
graph = tf.compat.v1.get_default_graph()
name_uid_map = PER_GRAPH_OBJECT_NAME_UIDS.get(graph, None)
if name_uid_map is None:
name_uid_map = collections.defaultdict(int)
PER_GRAPH_OBJECT_NAME_UIDS[graph] = name_uid_map
return name_uid_map
# DEVICE MANIPULATION
class _TfDeviceCaptureOp:
"""Class for capturing the TF device scope."""
def __init__(self):
self.device = None
def _set_device(self, device):
"""This method captures TF's explicit device scope setting."""
if isinstance(device, tf.DeviceSpec):
device = device.to_string()
self.device = device
def _set_device_from_string(self, device_str):
self.device = device_str
def _get_current_tf_device():
"""Return explicit device of current context, otherwise returns `None`.
Returns:
If the current device scope is explicitly set, it returns a string with
the device (`CPU` or `GPU`). If the scope is not explicitly set, it will
return `None`.
"""
graph = get_graph()
op = _TfDeviceCaptureOp()
graph._apply_device_functions(op)
if tf.__internal__.tf2.enabled():
return tf.DeviceSpec.from_string(op.device)
else:
return tf.compat.v1.DeviceSpec.from_string(op.device)
def _is_current_explicit_device(device_type):
"""Check if the current device is explicitly set to `device_type`.
Args:
device_type: A string containing `GPU` or `CPU` (case-insensitive).
Returns:
A boolean indicating if the current device scope is explicitly set on
the device type.
Raises:
ValueError: If the `device_type` string indicates an unsupported device.
"""
device_type = device_type.upper()
if device_type not in ["CPU", "GPU"]:
raise ValueError('`device_type` should be either "CPU" or "GPU".')
device = _get_current_tf_device()
return device is not None and device.device_type == device_type.upper()
def _get_available_gpus():
"""Get a list of available GPU devices (formatted as strings).
Returns:
A list of available GPU devices.
"""
if tf.compat.v1.executing_eagerly_outside_functions():
# Returns names of devices directly.
return [d.name for d in tf.config.list_logical_devices("GPU")]
global _LOCAL_DEVICES
if _LOCAL_DEVICES is None:
_LOCAL_DEVICES = get_session().list_devices()
return [x.name for x in _LOCAL_DEVICES if x.device_type == "GPU"]
def _has_nchw_support():
"""Check whether the current scope supports NCHW ops.
TensorFlow does not support NCHW on CPU. Therefore we check if we are not
explicitly put on
CPU, and have GPUs available. In this case there will be soft-placing on the
GPU device.
Returns:
bool: if the current scope device placement would support nchw
"""
explicitly_on_cpu = _is_current_explicit_device("CPU")
gpus_available = bool(_get_available_gpus())
return not explicitly_on_cpu and gpus_available
# VARIABLE MANIPULATION
def _constant_to_tensor(x, dtype):
"""Convert the input `x` to a tensor of type `dtype`.
This is slightly faster than the _to_tensor function, at the cost of
handling fewer cases.
Args:
x: An object to be converted (numpy arrays, floats, ints and lists of
them).
dtype: The destination type.
Returns:
A tensor.
"""
return tf.constant(x, dtype=dtype)
def _to_tensor(x, dtype):
"""Convert the input `x` to a tensor of type `dtype`.
Args:
x: An object to be converted (numpy array, list, tensors).
dtype: The destination type.
Returns:
A tensor.
"""
return tf.convert_to_tensor(x, dtype=dtype)