runtime: enable_amp: false training: save_dir: "/tmp/model" num_train_steps: 100000 checkpoint_every_n: 100000 train_log_every_n: 10 num_eval_steps: 1000 eval_log_every_n: 500 eval_timeout_in_s: 10000 num_epochs: 5 model: translation_optimizer: sgd: lr: 0.05 learning_rate: constant: 0.05 embeddings: tables: - name: user num_embeddings: 424_241 embedding_dim: 4 data_type: fp32 optimizer: sgd: lr: 0.01 learning_rate: constant: 0.01 - name: tweet num_embeddings: 72_543 embedding_dim: 4 data_type: fp32 optimizer: sgd: lr: 0.005 learning_rate: constant: 0.005 relations: - name: fav lhs: user rhs: tweet operator: translation - name: reply lhs: user rhs: tweet operator: translation - name: retweet lhs: user rhs: tweet operator: translation - name: magic_recs lhs: user rhs: tweet operator: translation train_data: data_root: "gs://follows_tml_01/tweet_eng/2023-01-23/large/edges/*" per_replica_batch_size: 500 global_negatives: 0 in_batch_negatives: 10 limit: 9990 validation_data: data_root: "gs://follows_tml_01/tweet_eng/2023-01-23/large/edges/*" per_replica_batch_size: 500 global_negatives: 0 in_batch_negatives: 10 limit: 10 offset: 9990