Skip to content

Commit 2aeb367

Browse files
committed
Merge commit for internal changes
2 parents 2f2cf33 + 034aaaa commit 2aeb367

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

42 files changed

+3848
-1201
lines changed

build_docs.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -64,6 +64,7 @@ def main(_):
6464
'tfl': ['python'],
6565
'tfl.aggregation_layer': ['Aggregation'],
6666
'tfl.categorical_calibration_layer': ['CategoricalCalibration'],
67+
'tfl.kronecker_factored_lattice_layer': ['KroneckerFactoredLattice'],
6768
'tfl.lattice_layer': ['Lattice'],
6869
'tfl.linear_layer': ['Linear'],
6970
'tfl.pwl_calibration_layer': ['PWLCalibration'],

docs/tutorials/aggregate_function_models.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -476,7 +476,7 @@
476476
"source": [
477477
"## Aggregate Function Model\n",
478478
"\n",
479-
"To construct a TFL premade model, first construct a model configuration from [tfl.configs](https://www.tensorflow.org/lattice/api_docs/python/tfl/configs). An aggregate function model is constructed using the [tfl.configs.AggregateFunctionConfig](https://www.tensorflow.org/lattice/api_docs/python/tfl/configs/AggregateFunctionConfig). It applies piecewise-linear and categorical calibration, followed by a lattice model on each dimension of the ragged input. It then applies an aggregation layer over the output for each dimension. This is then followed by an optional output piecewise-lienar calibration."
479+
"To construct a TFL premade model, first construct a model configuration from [tfl.configs](https://www.tensorflow.org/lattice/api_docs/python/tfl/configs). An aggregate function model is constructed using the [tfl.configs.AggregateFunctionConfig](https://www.tensorflow.org/lattice/api_docs/python/tfl/configs/AggregateFunctionConfig). It applies piecewise-linear and categorical calibration, followed by a lattice model on each dimension of the ragged input. It then applies an aggregation layer over the output for each dimension. This is then followed by an optional output piecewise-linear calibration."
480480
]
481481
},
482482
{

docs/tutorials/canned_estimators.ipynb

Lines changed: 71 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
},
1313
{
1414
"cell_type": "code",
15-
"execution_count": 0,
15+
"execution_count": null,
1616
"metadata": {
1717
"cellView": "form",
1818
"colab": {},
@@ -101,7 +101,7 @@
101101
},
102102
{
103103
"cell_type": "code",
104-
"execution_count": 0,
104+
"execution_count": null,
105105
"metadata": {
106106
"colab": {},
107107
"colab_type": "code",
@@ -125,7 +125,7 @@
125125
},
126126
{
127127
"cell_type": "code",
128-
"execution_count": 0,
128+
"execution_count": null,
129129
"metadata": {
130130
"cellView": "both",
131131
"colab": {},
@@ -136,6 +136,7 @@
136136
"source": [
137137
"import tensorflow as tf\n",
138138
"\n",
139+
"import copy\n",
139140
"import logging\n",
140141
"import numpy as np\n",
141142
"import pandas as pd\n",
@@ -157,7 +158,7 @@
157158
},
158159
{
159160
"cell_type": "code",
160-
"execution_count": 0,
161+
"execution_count": null,
161162
"metadata": {
162163
"cellView": "both",
163164
"colab": {},
@@ -190,7 +191,7 @@
190191
},
191192
{
192193
"cell_type": "code",
193-
"execution_count": 0,
194+
"execution_count": null,
194195
"metadata": {
195196
"cellView": "both",
196197
"colab": {},
@@ -219,7 +220,7 @@
219220
},
220221
{
221222
"cell_type": "code",
222-
"execution_count": 0,
223+
"execution_count": null,
223224
"metadata": {
224225
"colab": {},
225226
"colab_type": "code",
@@ -285,7 +286,7 @@
285286
},
286287
{
287288
"cell_type": "code",
288-
"execution_count": 0,
289+
"execution_count": null,
289290
"metadata": {
290291
"colab": {},
291292
"colab_type": "code",
@@ -341,7 +342,7 @@
341342
},
342343
{
343344
"cell_type": "code",
344-
"execution_count": 0,
345+
"execution_count": null,
345346
"metadata": {
346347
"colab": {},
347348
"colab_type": "code",
@@ -450,7 +451,7 @@
450451
},
451452
{
452453
"cell_type": "code",
453-
"execution_count": 0,
454+
"execution_count": null,
454455
"metadata": {
455456
"colab": {},
456457
"colab_type": "code",
@@ -499,7 +500,7 @@
499500
},
500501
{
501502
"cell_type": "code",
502-
"execution_count": 0,
503+
"execution_count": null,
503504
"metadata": {
504505
"colab": {},
505506
"colab_type": "code",
@@ -559,7 +560,7 @@
559560
},
560561
{
561562
"cell_type": "code",
562-
"execution_count": 0,
563+
"execution_count": null,
563564
"metadata": {
564565
"colab": {},
565566
"colab_type": "code",
@@ -568,7 +569,7 @@
568569
"outputs": [],
569570
"source": [
570571
"# This is random lattice ensemble model with separate calibration:\n",
571-
"# model output is the average output of separatly calibrated lattices.\n",
572+
"# model output is the average output of separately calibrated lattices.\n",
572573
"model_config = tfl.configs.CalibratedLatticeEnsembleConfig(\n",
573574
" feature_configs=feature_configs,\n",
574575
" num_lattices=5,\n",
@@ -589,6 +590,60 @@
589590
"tfl.visualization.draw_model_graph(model_graph, calibrator_dpi=15)"
590591
]
591592
},
593+
{
594+
"cell_type": "markdown",
595+
"metadata": {
596+
"colab_type": "text",
597+
"id": "7uyO8s97FGJM"
598+
},
599+
"source": [
600+
"### RTL Layer Random Lattice Ensemble\n",
601+
"\n",
602+
"The following model config uses a `tfl.layers.RTL` layer that uses a random subset of features for each lattice. We note that `tfl.layers.RTL` only supports monotonicity constraints and must have the same lattice size for all features and no per-feature regularization. Note that using a `tfl.layers.RTL` layer lets you scale to much larger ensembles than using separate `tfl.layers.Lattice` instances."
603+
]
604+
},
605+
{
606+
"cell_type": "code",
607+
"execution_count": null,
608+
"metadata": {
609+
"colab": {},
610+
"colab_type": "code",
611+
"id": "8v7dKg-FF7iz"
612+
},
613+
"outputs": [],
614+
"source": [
615+
"# Make sure our feature configs have the same lattice size, no per-feature\n",
616+
"# regularization, and only monotonicity constraints.\n",
617+
"rtl_layer_feature_configs = copy.deepcopy(feature_configs)\n",
618+
"for feature_config in rtl_layer_feature_configs:\n",
619+
" feature_config.lattice_size = 2\n",
620+
" feature_config.unimodality = 'none'\n",
621+
" feature_config.reflects_trust_in = None\n",
622+
" feature_config.dominates = None\n",
623+
" feature_config.regularizer_configs = None\n",
624+
"# This is RTL layer ensemble model with separate calibration:\n",
625+
"# model output is the average output of separately calibrated lattices.\n",
626+
"model_config = tfl.configs.CalibratedLatticeEnsembleConfig(\n",
627+
" lattices='rtl_layer',\n",
628+
" feature_configs=rtl_layer_feature_configs,\n",
629+
" num_lattices=5,\n",
630+
" lattice_rank=3)\n",
631+
"# A CannedClassifier is constructed from the given model config.\n",
632+
"estimator = tfl.estimators.CannedClassifier(\n",
633+
" feature_columns=feature_columns,\n",
634+
" model_config=model_config,\n",
635+
" feature_analysis_input_fn=feature_analysis_input_fn,\n",
636+
" optimizer=tf.keras.optimizers.Adam(LEARNING_RATE),\n",
637+
" config=tf.estimator.RunConfig(tf_random_seed=42))\n",
638+
"estimator.train(input_fn=train_input_fn)\n",
639+
"results = estimator.evaluate(input_fn=test_input_fn)\n",
640+
"print('Random ensemble test AUC: {}'.format(results['auc']))\n",
641+
"saved_model_path = estimator.export_saved_model(estimator.model_dir,\n",
642+
" serving_input_fn)\n",
643+
"model_graph = tfl.estimators.get_model_graph(saved_model_path)\n",
644+
"tfl.visualization.draw_model_graph(model_graph, calibrator_dpi=15)"
645+
]
646+
},
592647
{
593648
"cell_type": "markdown",
594649
"metadata": {
@@ -605,7 +660,7 @@
605660
},
606661
{
607662
"cell_type": "code",
608-
"execution_count": 0,
663+
"execution_count": null,
609664
"metadata": {
610665
"colab": {},
611666
"colab_type": "code",
@@ -634,7 +689,7 @@
634689
},
635690
{
636691
"cell_type": "code",
637-
"execution_count": 0,
692+
"execution_count": null,
638693
"metadata": {
639694
"colab": {},
640695
"colab_type": "code",
@@ -643,7 +698,7 @@
643698
"outputs": [],
644699
"source": [
645700
"# This is Crystals ensemble model with separate calibration: model output is\n",
646-
"# the average output of separatly calibrated lattices.\n",
701+
"# the average output of separately calibrated lattices.\n",
647702
"model_config = tfl.configs.CalibratedLatticeEnsembleConfig(\n",
648703
" feature_configs=feature_configs,\n",
649704
" lattices='crystals',\n",
@@ -680,7 +735,7 @@
680735
},
681736
{
682737
"cell_type": "code",
683-
"execution_count": 0,
738+
"execution_count": null,
684739
"metadata": {
685740
"colab": {},
686741
"colab_type": "code",

docs/tutorials/custom_estimators.ipynb

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -303,7 +303,7 @@
303303
"\n",
304304
"There are several ways to create a custom estimator. Here we will construct a `model_fn` that calls a Keras model on the parsed input tensors. To parse the input features, you can use `tf.feature_column.input_layer`, `tf.keras.layers.DenseFeatures`, or `tfl.estimators.transform_features`. If you use the latter, you will not need to wrap categorical features with dense feature columns, and the resulting tensors will not be concatenated, which makes it easier to use the features in the calibration layers.\n",
305305
"\n",
306-
"To construct a model, you can mix and match TFL layers or any other Keras layers. Here we create a calibrated lattice Keras model out of TFL layers and impose several monotonicity constraints. When then use the Keras model to create the custom estimator.\n"
306+
"To construct a model, you can mix and match TFL layers or any other Keras layers. Here we create a calibrated lattice Keras model out of TFL layers and impose several monotonicity constraints. We then use the Keras model to create the custom estimator.\n"
307307
]
308308
},
309309
{

docs/tutorials/keras_layers.ipynb

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -237,7 +237,7 @@
237237
"id": "W3DnEKWvQYXm"
238238
},
239239
"source": [
240-
"We use a `tfl.layers.ParallelCombination` layer to group together calibration layers which have to be executed in paralel in order to be able to create a Sequential model.\n"
240+
"We use a `tfl.layers.ParallelCombination` layer to group together calibration layers which have to be executed in parallel in order to be able to create a Sequential model.\n"
241241
]
242242
},
243243
{
@@ -260,7 +260,7 @@
260260
"id": "BPZsSUZiQiwc"
261261
},
262262
"source": [
263-
"We create a calibration layer for each feature and add it to the parallel combination layer. For numeric features we use `tfl.layers.PWLCalibration` and for categorical features we use `tfl.layers.CategoricalCalibration`."
263+
"We create a calibration layer for each feature and add it to the parallel combination layer. For numeric features we use `tfl.layers.PWLCalibration`, and for categorical features we use `tfl.layers.CategoricalCalibration`."
264264
]
265265
},
266266
{
@@ -282,7 +282,7 @@
282282
" training_data_df['age'].min(), training_data_df['age'].max(), num=5),\n",
283283
" # You need to ensure that input keypoints have same dtype as layer input.\n",
284284
" # You can do it by setting dtype here or by providing keypoints in such\n",
285-
" # format which will be converted to deisred tf.dtype by default.\n",
285+
" # format which will be converted to desired tf.dtype by default.\n",
286286
" dtype=tf.float32,\n",
287287
" # Output range must correspond to expected lattice input range.\n",
288288
" output_min=0.0,\n",
@@ -542,7 +542,7 @@
542542
" training_data_df['age'].min(), training_data_df['age'].max(), num=5),\n",
543543
" # You need to ensure that input keypoints have same dtype as layer input.\n",
544544
" # You can do it by setting dtype here or by providing keypoints in such\n",
545-
" # format which will be converted to deisred tf.dtype by default.\n",
545+
" # format which will be converted to desired tf.dtype by default.\n",
546546
" dtype=tf.float32,\n",
547547
" # Output range must correspond to expected lattice input range.\n",
548548
" output_min=0.0,\n",

0 commit comments

Comments
 (0)