Twitter Recommendation Algorithm

Please note we have force-pushed a new initial commit in order to remove some publicly-available Twitter user information. Note that this process may be required in the future.
This commit is contained in:
twitter-team
2023-03-31 17:36:31 -05:00
commit ef4c5eb65e
5364 changed files with 460239 additions and 0 deletions

View File

@ -0,0 +1,220 @@
from collections import defaultdict
import os
from toxicity_ml_pipeline.settings.default_settings_tox import REMOTE_LOGDIR
from toxicity_ml_pipeline.settings.default_settings_abs import LABEL_NAMES
from toxicity_ml_pipeline.utils.absv_utils import parse_labeled_data
from toxicity_ml_pipeline.utils.helpers import compute_precision_fixed_recall, execute_command
from sklearn.metrics import average_precision_score, roc_auc_score
import tensorflow as tf
import wandb
class NothingCallback(tf.keras.callbacks.Callback):
def on_epoch_begin(self, epoch, logs=None):
print("ici, ", epoch)
def on_epoch_end(self, epoch, logs=None):
print("fin ", epoch)
def on_train_batch_end(self, batch, logs=None):
print("fin de batch ", batch)
class ControlledStoppingCheckpointCallback(tf.keras.callbacks.ModelCheckpoint):
def __init__(self, stopping_epoch, *args, **kwargs):
super().__init__(*args, **kwargs)
self.stopping_epoch = stopping_epoch
def on_epoch_end(self, epoch, logs=None):
super().on_epoch_end(epoch, logs)
if epoch == self.stopping_epoch:
self.model.stop_training = True
class SyncingTensorBoard(tf.keras.callbacks.TensorBoard):
def __init__(self, remote_logdir=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.remote_logdir = remote_logdir if remote_logdir is not None else REMOTE_LOGDIR
def on_epoch_end(self, epoch, logs=None):
super().on_epoch_end(epoch, logs=logs)
self.synchronize()
def synchronize(self):
base_dir = os.path.dirname(self.log_dir)
cmd = f"gsutil -m rsync -r {base_dir} {self.remote_logdir}"
execute_command(cmd)
class GradientLoggingTensorBoard(SyncingTensorBoard):
def __init__(self, loader, val_data, freq, *args, **kwargs):
super().__init__(*args, **kwargs)
val_dataset = loader.get_balanced_dataset(
training_data=val_data, size_limit=50, return_as_batch=False
)
data_args = list(val_dataset.batch(32).take(1))[0]
self.x_batch, self.y_batch = data_args[0], data_args[1]
self.freq = freq
self.counter = 0
def _log_gradients(self):
writer = self._train_writer
with writer.as_default():
with tf.GradientTape() as tape:
y_pred = self.model(self.x_batch)
loss = self.model.compiled_loss(y_true=self.y_batch, y_pred=y_pred)
gradient_norm = tf.linalg.global_norm(tape.gradient(loss, self.model.trainable_weights))
tf.summary.scalar("gradient_norm", data=gradient_norm, step=self.counter)
writer.flush()
def on_train_batch_end(self, batch, logs=None):
super().on_batch_end(batch, logs=logs)
self.counter += 1
if batch % self.freq == 0:
self._log_gradients()
class AdditionalResultLogger(tf.keras.callbacks.Callback):
def __init__(
self,
data,
set_,
fixed_recall=0.85,
from_logits=False,
dataset_transform_func=None,
batch_size=64,
dual_head=None,
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.set_ = set_
if data is None:
return None
self.single_head = True
try:
self.labels = data.int_label.values
except AttributeError:
self.labels = data.to_dataframe()[LABEL_NAMES].values.astype('int')
self.data = data.to_tf_dataset().map(parse_labeled_data).batch(batch_size)
self.label_names = LABEL_NAMES
else:
self.label_names = ['']
if dual_head:
self.label_names = [f'{e}_label' for e in dual_head]
self.labels = {f'{e}_output': data[f'{e}_label'].values for e in dual_head}
self.single_head = False
if dataset_transform_func is None:
self.data = data.text.values
else:
self.data = dataset_transform_func(data, mb_size=batch_size, shuffle=False)
finally:
if len(self.label_names) == 1:
self.metric_kw = {}
else:
self.metric_kw = {'average': None}
self.counter = 0
self.best_metrics = defaultdict(float)
self.from_logits = from_logits
print(f"Loaded callback for {set_}, from_logits: {from_logits}, labels {self.label_names}")
if 1 < fixed_recall <= 100:
fixed_recall = fixed_recall / 100
elif not (0 < fixed_recall <= 100):
raise ValueError("Threshold should be between 0 and 1, or 0 and 100")
self.fixed_recall = fixed_recall
self.batch_size = batch_size
def compute_precision_fixed_recall(self, labels, preds):
result, _ = compute_precision_fixed_recall(labels=labels, preds=preds,
fixed_recall=self.fixed_recall)
return result
def on_epoch_end(self, epoch, logs=None):
self.additional_evaluations(step=epoch, eval_time="epoch")
def on_train_batch_end(self, batch, logs=None):
self.counter += 1
if self.counter % 2000 == 0:
self.additional_evaluations(step=self.counter, eval_time="batch")
def _binary_evaluations(self, preds, label_name=None, class_index=None):
mask = None
curr_labels = self.labels
if label_name is not None:
curr_labels = self.labels[label_name]
if class_index is not None:
curr_labels = (curr_labels == class_index).astype(int)
if -1 in curr_labels:
mask = curr_labels != -1
curr_labels = curr_labels[mask]
preds = preds[mask]
return {
f"precision_recall{self.fixed_recall}": self.compute_precision_fixed_recall(
labels=curr_labels, preds=preds
),
"pr_auc": average_precision_score(y_true=curr_labels, y_score=preds),
"roc_auc": roc_auc_score(y_true=curr_labels, y_score=preds),
}
def _multiclass_evaluations(self, preds):
pr_auc_l = average_precision_score(y_true=self.labels, y_score=preds, **self.metric_kw)
roc_auc_l = roc_auc_score(y_true=self.labels, y_score=preds, **self.metric_kw)
metrics = {}
for i, label in enumerate(self.label_names):
metrics[f'pr_auc_{label}'] = pr_auc_l[i]
metrics[f'roc_auc_{label}'] = roc_auc_l[i]
return metrics
def additional_evaluations(self, step, eval_time):
print("Evaluating ", self.set_, eval_time, step)
preds = self.model.predict(x=self.data, batch_size=self.batch_size)
if self.from_logits:
preds = tf.keras.activations.sigmoid(preds.logits).numpy()
if self.single_head:
if len(self.label_names) == 1:
metrics = self._binary_evaluations(preds)
else:
metrics = self._multiclass_evaluations(preds)
else:
if preds[0].shape[1] == 1:
binary_preds = preds[0]
multic_preds = preds[1]
else:
binary_preds = preds[1]
multic_preds = preds[0]
binary_metrics = self._binary_evaluations(binary_preds, label_name='target_output')
metrics = {f'{k}_target': v for k, v in binary_metrics.items()}
num_classes = multic_preds.shape[1]
for class_ in range(num_classes):
binary_metrics = self._binary_evaluations(multic_preds[:, class_], label_name='content_output', class_index=class_)
metrics.update({f'{k}_content_{class_}': v for k, v in binary_metrics.items()})
for k, v in metrics.items():
self.best_metrics[f"max_{k}"] = max(v, self.best_metrics[f"max_{k}"])
self.log_metrics(metrics, step=step, eval_time=eval_time)
def log_metrics(self, metrics_d, step, eval_time):
commit = False if self.set_ == "validation" else True
to_report = {self.set_: {**metrics_d, **self.best_metrics}}
if eval_time == "epoch":
to_report["epoch"] = step
wandb.log(to_report, commit=commit)

View File

@ -0,0 +1,56 @@
import tensorflow as tf
from keras.utils import tf_utils
from keras.utils import losses_utils
from keras import backend
def inv_kl_divergence(y_true, y_pred):
y_pred = tf.convert_to_tensor(y_pred)
y_true = tf.cast(y_true, y_pred.dtype)
y_true = backend.clip(y_true, backend.epsilon(), 1)
y_pred = backend.clip(y_pred, backend.epsilon(), 1)
return tf.reduce_sum(y_pred * tf.math.log(y_pred / y_true), axis=-1)
def masked_bce(y_true, y_pred):
y_true = tf.cast(y_true, dtype=tf.float32)
mask = y_true != -1
return tf.keras.metrics.binary_crossentropy(tf.boolean_mask(y_true, mask),
tf.boolean_mask(y_pred, mask))
class LossFunctionWrapper(tf.keras.losses.Loss):
def __init__(self,
fn,
reduction=losses_utils.ReductionV2.AUTO,
name=None,
**kwargs):
super().__init__(reduction=reduction, name=name)
self.fn = fn
self._fn_kwargs = kwargs
def call(self, y_true, y_pred):
if tf.is_tensor(y_pred) and tf.is_tensor(y_true):
y_pred, y_true = losses_utils.squeeze_or_expand_dimensions(y_pred, y_true)
ag_fn = tf.__internal__.autograph.tf_convert(self.fn, tf.__internal__.autograph.control_status_ctx())
return ag_fn(y_true, y_pred, **self._fn_kwargs)
def get_config(self):
config = {}
for k, v in self._fn_kwargs.items():
config[k] = backend.eval(v) if tf_utils.is_tensor_or_variable(v) else v
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
class InvKLD(LossFunctionWrapper):
def __init__(self,
reduction=losses_utils.ReductionV2.AUTO,
name='inv_kl_divergence'):
super().__init__(inv_kl_divergence, name=name, reduction=reduction)
class MaskedBCE(LossFunctionWrapper):
def __init__(self,
reduction=losses_utils.ReductionV2.AUTO,
name='masked_bce'):
super().__init__(masked_bce, name=name, reduction=reduction)

View File

@ -0,0 +1,44 @@
from typing import Callable
import tensorflow as tf
class WarmUp(tf.keras.optimizers.schedules.LearningRateSchedule):
def __init__(
self,
initial_learning_rate: float,
decay_schedule_fn: Callable,
warmup_steps: int,
power: float = 1.0,
name: str = "",
):
super().__init__()
self.initial_learning_rate = initial_learning_rate
self.warmup_steps = warmup_steps
self.power = power
self.decay_schedule_fn = decay_schedule_fn
self.name = name
def __call__(self, step):
with tf.name_scope(self.name or "WarmUp") as name:
global_step_float = tf.cast(step, tf.float32)
warmup_steps_float = tf.cast(self.warmup_steps, tf.float32)
warmup_percent_done = global_step_float / warmup_steps_float
warmup_learning_rate = self.initial_learning_rate * tf.math.pow(
warmup_percent_done, self.power
)
return tf.cond(
global_step_float < warmup_steps_float,
lambda: warmup_learning_rate,
lambda: self.decay_schedule_fn(step - self.warmup_steps),
name=name,
)
def get_config(self):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}