|
1 | 1 | """Prediction heads built on top of the shared AlphaFold representations. |
2 | 2 |
|
3 | 3 | The classes in this module project internal sequence or pair features into the |
4 | | -single representation, pLDDT logits, and distogram logits used by the model |
5 | | -output dictionary and downstream loss computation. |
| 4 | +single representation, pLDDT logits, distogram logits, masked-MSA logits, and |
| 5 | +an optional predicted-TM head used for confidence-style reporting. |
6 | 6 | """ |
7 | 7 |
|
8 | 8 | import torch |
@@ -75,3 +75,92 @@ def __init__(self, c_m=256, num_classes=23): |
75 | 75 | def forward(self, m): |
76 | 76 | logits = self.linear(self.ln(m)) |
77 | 77 | return logits |
| 78 | + |
| 79 | + |
| 80 | +def compute_predicted_tm_score( |
| 81 | + tm_logits: torch.Tensor, |
| 82 | + *, |
| 83 | + residue_mask: torch.Tensor | None = None, |
| 84 | + bin_centers: torch.Tensor | None = None, |
| 85 | + eps: float = 1e-8, |
| 86 | +) -> torch.Tensor: |
| 87 | + """AlphaFold pTM lower bound from pairwise error logits. |
| 88 | +
|
| 89 | + Parameters |
| 90 | + ---------- |
| 91 | + tm_logits : [B, L, L, num_bins] |
| 92 | + Logits over aligned-error bins derived from the final pair representation. |
| 93 | + residue_mask : [B, L], optional |
| 94 | + Valid residues to include in the domain / chain subset. |
| 95 | + bin_centers : [num_bins], optional |
| 96 | + Representative error values for each aligned-error bin. |
| 97 | + eps : float |
| 98 | + Small numerical constant. |
| 99 | + """ |
| 100 | + |
| 101 | + if tm_logits.ndim != 4: |
| 102 | + raise ValueError(f"tm_logits must have shape [B, L, L, C], got {tuple(tm_logits.shape)}") |
| 103 | + |
| 104 | + batch_size, length, _, num_bins = tm_logits.shape |
| 105 | + if bin_centers is None: |
| 106 | + if num_bins <= 1: |
| 107 | + bin_width = 0.5 |
| 108 | + else: |
| 109 | + bin_width = 31.5 / float(num_bins - 1) |
| 110 | + bin_centers = torch.arange(num_bins, device=tm_logits.device, dtype=tm_logits.dtype) |
| 111 | + bin_centers = bin_width * (bin_centers + 0.5) |
| 112 | + else: |
| 113 | + bin_centers = bin_centers.to(device=tm_logits.device, dtype=tm_logits.dtype) |
| 114 | + if bin_centers.numel() != num_bins: |
| 115 | + raise ValueError( |
| 116 | + f"bin_centers must have {num_bins} entries, got {bin_centers.numel()}" |
| 117 | + ) |
| 118 | + |
| 119 | + if residue_mask is None: |
| 120 | + residue_mask = torch.ones(batch_size, length, device=tm_logits.device, dtype=tm_logits.dtype) |
| 121 | + else: |
| 122 | + residue_mask = residue_mask.to(device=tm_logits.device, dtype=tm_logits.dtype) |
| 123 | + |
| 124 | + num_res = residue_mask.sum(dim=-1).clamp_min(1.0) |
| 125 | + d0 = 1.24 * torch.clamp(num_res, min=19.0).sub(15.0).pow(1.0 / 3.0) - 1.8 |
| 126 | + d0 = d0.clamp_min(0.5) |
| 127 | + |
| 128 | + probs = F.softmax(tm_logits, dim=-1) |
| 129 | + tm_kernel = 1.0 / (1.0 + (bin_centers.view(1, 1, 1, -1) / (d0.view(-1, 1, 1, 1) + eps)) ** 2) |
| 130 | + expected_tm = (probs * tm_kernel).sum(dim=-1) |
| 131 | + |
| 132 | + per_alignment = (expected_tm * residue_mask[:, None, :]).sum(dim=-1) / num_res.view(-1, 1) |
| 133 | + per_alignment = per_alignment.masked_fill(residue_mask <= 0, float("-inf")) |
| 134 | + |
| 135 | + ptm = per_alignment.max(dim=-1).values |
| 136 | + has_valid = residue_mask.sum(dim=-1) > 0 |
| 137 | + ptm = torch.where(has_valid, ptm, torch.zeros_like(ptm)) |
| 138 | + return ptm |
| 139 | + |
| 140 | + |
| 141 | +class TMHead(nn.Module): |
| 142 | + def __init__(self, c_z=128, num_bins=64, max_error=31.5): |
| 143 | + super().__init__() |
| 144 | + self.num_bins = int(num_bins) |
| 145 | + self.max_error = float(max_error) |
| 146 | + self.ln = nn.LayerNorm(c_z) |
| 147 | + self.linear = nn.Linear(c_z, self.num_bins) |
| 148 | + |
| 149 | + if self.num_bins <= 1: |
| 150 | + bin_width = 0.5 |
| 151 | + else: |
| 152 | + bin_width = self.max_error / float(self.num_bins - 1) |
| 153 | + bin_centers = bin_width * (torch.arange(self.num_bins, dtype=torch.float32) + 0.5) |
| 154 | + self.register_buffer("bin_centers", bin_centers, persistent=False) |
| 155 | + |
| 156 | + def compute_ptm(self, tm_logits, residue_mask=None): |
| 157 | + return compute_predicted_tm_score( |
| 158 | + tm_logits, |
| 159 | + residue_mask=residue_mask, |
| 160 | + bin_centers=self.bin_centers, |
| 161 | + ) |
| 162 | + |
| 163 | + def forward(self, z, residue_mask=None): |
| 164 | + logits = self.linear(self.ln(z)) |
| 165 | + ptm = self.compute_ptm(logits, residue_mask=residue_mask) |
| 166 | + return logits, ptm |
0 commit comments