elektronn3.modules.axial_attention module

class elektronn3.modules.axial_attention.AxialAttention(*args: Any, **kwargs: Any)[source]

Bases: torch.nn.

forward(x)[source]
class elektronn3.modules.axial_attention.AxialImageTransformer(*args: Any, **kwargs: Any)[source]

Bases: torch.nn.

forward(x)[source]
class elektronn3.modules.axial_attention.AxialPositionalEmbedding(*args: Any, **kwargs: Any)[source]

Bases: torch.nn.

forward(x)[source]
class elektronn3.modules.axial_attention.Deterministic(*args: Any, **kwargs: Any)[source]

Bases: torch.nn.

forward(*args, record_rng=False, set_rng=False, **kwargs)[source]
record_rng(*args)[source]
class elektronn3.modules.axial_attention.IrreversibleBlock(*args: Any, **kwargs: Any)[source]

Bases: torch.nn.

forward(x, f_args, g_args)[source]
class elektronn3.modules.axial_attention.PermuteToFrom(*args: Any, **kwargs: Any)[source]

Bases: torch.nn.

forward(x, **kwargs)[source]
class elektronn3.modules.axial_attention.ReversibleBlock(*args: Any, **kwargs: Any)[source]

Bases: torch.nn.

backward_pass(y, dy, f_args={}, g_args={})[source]
forward(x, f_args={}, g_args={})[source]
class elektronn3.modules.axial_attention.ReversibleSequence(*args: Any, **kwargs: Any)[source]

Bases: torch.nn.

forward(x, arg_route=(True, True), **kwargs)[source]
class elektronn3.modules.axial_attention.Rezero(*args: Any, **kwargs: Any)[source]

Bases: torch.nn.

forward(x)[source]
class elektronn3.modules.axial_attention.SelfAttention(*args: Any, **kwargs: Any)[source]

Bases: torch.nn.

forward(x, kv=None)[source]
class elektronn3.modules.axial_attention.Sequential(*args: Any, **kwargs: Any)[source]

Bases: torch.nn.

forward(x)[source]
elektronn3.modules.axial_attention.calculate_permutations(num_dimensions, emb_dim)[source]
elektronn3.modules.axial_attention.exists(val)[source]
elektronn3.modules.axial_attention.map_el_ind(arr, ind)[source]
elektronn3.modules.axial_attention.sort_and_return_indices(arr)[source]