[docs]classBPRLoss(nn.Module):r"""This criterion computes the Bayesian Personalized Ranking (BPR) loss between the positive scores and the negative scores. Parameters: ``alpha`` (``float``, optional): The weight for the positive scores in the BPR loss. Defaults to ``1.0``. ``beta`` (``float``, optional): The weight for the negative scores in the BPR loss. Defaults to ``1.0``. ``activation`` (``str``, optional): The activation function to use can be one of ``"sigmoid_then_log"``, ``"softplus"``. Defaults to ``"sigmoid_then_log"``. """def__init__(self,alpha:float=1.0,beta:float=1.0,activation:str="sigmoid_then_log",):super().__init__()assertactivationin("sigmoid_then_log","softplus",),"activation function of BPRLoss must be sigmoid_then_log or softplus."self.activation=activationself.alpha=alphaself.beta=beta
[docs]defforward(self,pos_scores:torch.Tensor,neg_scores:torch.Tensor):r"""The forward function of BPRLoss. Parameters: ``pos_scores`` (``torch.Tensor``): The positive scores. ``neg_scores`` (``torch.Tensor``): The negative scores. """ifself.activation=="sigmoid_then_log":loss=-(self.alpha*pos_scores-self.beta*neg_scores).sigmoid().log()elifself.activation=="softplus":loss=F.softplus(self.beta*neg_scores-self.alpha*pos_scores)else:raiseNotImplementedErrorreturnloss.mean()