gigl.src.common.models.layers.SoftmaxLoss ============================================== .. currentmodule:: gigl.src.common.models.layers.loss .. autoclass:: SoftmaxLoss :members: :show-inheritance: :special-members: :inherited-members: .. rubric:: Methods .. autosummary:: :nosignatures: ~SoftmaxLoss.__init__ ~SoftmaxLoss.add_module ~SoftmaxLoss.apply ~SoftmaxLoss.bfloat16 ~SoftmaxLoss.buffers ~SoftmaxLoss.children ~SoftmaxLoss.compile ~SoftmaxLoss.cpu ~SoftmaxLoss.cuda ~SoftmaxLoss.double ~SoftmaxLoss.eval ~SoftmaxLoss.extra_repr ~SoftmaxLoss.float ~SoftmaxLoss.forward ~SoftmaxLoss.get_buffer ~SoftmaxLoss.get_extra_state ~SoftmaxLoss.get_parameter ~SoftmaxLoss.get_submodule ~SoftmaxLoss.half ~SoftmaxLoss.ipu ~SoftmaxLoss.load_state_dict ~SoftmaxLoss.modules ~SoftmaxLoss.mtia ~SoftmaxLoss.named_buffers ~SoftmaxLoss.named_children ~SoftmaxLoss.named_modules ~SoftmaxLoss.named_parameters ~SoftmaxLoss.parameters ~SoftmaxLoss.register_backward_hook ~SoftmaxLoss.register_buffer ~SoftmaxLoss.register_forward_hook ~SoftmaxLoss.register_forward_pre_hook ~SoftmaxLoss.register_full_backward_hook ~SoftmaxLoss.register_full_backward_pre_hook ~SoftmaxLoss.register_load_state_dict_post_hook ~SoftmaxLoss.register_load_state_dict_pre_hook ~SoftmaxLoss.register_module ~SoftmaxLoss.register_parameter ~SoftmaxLoss.register_state_dict_post_hook ~SoftmaxLoss.register_state_dict_pre_hook ~SoftmaxLoss.requires_grad_ ~SoftmaxLoss.set_extra_state ~SoftmaxLoss.set_submodule ~SoftmaxLoss.share_memory ~SoftmaxLoss.state_dict ~SoftmaxLoss.to ~SoftmaxLoss.to_empty ~SoftmaxLoss.train ~SoftmaxLoss.type ~SoftmaxLoss.xpu ~SoftmaxLoss.zero_grad