stage and semaphore overhaul partial

This commit is contained in:
wishu
2022-06-08 10:00:12 +10:00
parent b40da768cf
commit 3788819b5c
6 changed files with 461 additions and 1826 deletions

View File

@@ -1,7 +1,7 @@
package mhfpacket
import (
"errors"
import (
"errors"
"github.com/Solenataris/Erupe/network/clientctx"
"github.com/Solenataris/Erupe/network"
@@ -9,7 +9,14 @@ import (
)
// MsgMhfTransitMessage represents the MSG_MHF_TRANSIT_MESSAGE
type MsgMhfTransitMessage struct{}
type MsgMhfTransitMessage struct {
AckHandle uint32
Unk0 uint8
Unk1 uint8
Unk2 uint16
Unk3 uint16
TargetID uint32
}
// Opcode returns the ID associated with this packet type.
func (m *MsgMhfTransitMessage) Opcode() network.PacketID {
@@ -18,7 +25,13 @@ func (m *MsgMhfTransitMessage) Opcode() network.PacketID {
// Parse parses the packet from binary
func (m *MsgMhfTransitMessage) Parse(bf *byteframe.ByteFrame, ctx *clientctx.ClientContext) error {
return errors.New("NOT IMPLEMENTED")
m.AckHandle = bf.ReadUint32()
m.Unk0 = bf.ReadUint8()
m.Unk1 = bf.ReadUint8()
m.Unk2 = bf.ReadUint16()
m.Unk3 = bf.ReadUint16()
m.TargetID = bf.ReadUint32()
return nil
}
// Build builds a binary packet from the current data.

View File

@@ -183,13 +183,12 @@ func logoutPlayer(s *Session) {
return
}
s.stage.RLock()
for client := range s.stage.clients {
client.QueueSendMHF(&mhfpacket.MsgSysDeleteUser{
CharID: s.charID,
})
}
s.stage.RUnlock()
s.server.BroadcastMHF(&mhfpacket.MsgSysDeleteUser {
CharID: s.charID,
}, s)
delete(s.server.sessions, s.rawConn)
s.rawConn.Close()
if s.server.erupeConfig.DevModeOptions.ServerName != "" {
_, err := s.server.db.Exec("UPDATE servers SET current_players=$1 WHERE server_name=$2", uint32(len(s.server.sessions)), s.server.erupeConfig.DevModeOptions.ServerName)
@@ -198,13 +197,16 @@ func logoutPlayer(s *Session) {
}
}
removeSessionFromStage(s)
if _, exists := s.server.semaphore["hs_l0u3B51J9k3"]; exists {
if _, ok := s.server.semaphore["hs_l0u3B51J9k3"].reservedClientSlots[s.charID]; ok {
removeSessionFromSemaphore(s)
}
s.server.Lock()
for _, stage := range s.server.stages {
if _, exists := stage.reservedClientSlots[s.charID]; exists {
delete(stage.reservedClientSlots, s.charID)
}
}
s.server.Unlock()
removeSessionFromSemaphore(s)
removeSessionFromStage(s)
var timePlayed int
err := s.server.db.QueryRow("SELECT time_played FROM characters WHERE id = $1", s.charID).Scan(&timePlayed)
@@ -213,7 +215,7 @@ func logoutPlayer(s *Session) {
var rpGained int
if s.rights == 0x08091e4e || s.rights == 0x08091e0e { // N Course
if s.rights > 0x40000000 { // N Course
rpGained = timePlayed / 900
timePlayed = timePlayed % 900
} else {
@@ -326,7 +328,13 @@ func handleMsgSysRightsReload(s *Session, p mhfpacket.MHFPacket) {
doAckSimpleSucceed(s, pkt.AckHandle, []byte{0x00, 0x00, 0x00, 0x00})
}
func handleMsgMhfTransitMessage(s *Session, p mhfpacket.MHFPacket) {}
func handleMsgMhfTransitMessage(s *Session, p mhfpacket.MHFPacket) {
pkt := p.(*mhfpacket.MsgMhfTransitMessage)
// TODO: figure out what this is supposed to return
// probably what world+land the targeted character is on?
// stubbed response will just say user not found
doAckBufSucceed(s, pkt.AckHandle, make([]byte, 4))
}
func handleMsgCaExchangeItem(s *Session, p mhfpacket.MHFPacket) {}

File diff suppressed because it is too large Load Diff

View File

@@ -2,10 +2,22 @@ package channelserver
import (
"fmt"
"strings"
"github.com/Solenataris/Erupe/network/mhfpacket"
)
func removeSessionFromSemaphore(s *Session) {
s.server.semaphoreLock.Lock()
for _, semaphore := range s.server.semaphore {
if _, exists := semaphore.clients[s]; exists {
delete(semaphore.clients, s)
}
}
releaseRaviSemaphore(s)
s.server.semaphoreLock.Unlock()
}
func handleMsgSysCreateSemaphore(s *Session, p mhfpacket.MHFPacket) {
pkt := p.(*mhfpacket.MsgSysCreateSemaphore)
doAckSimpleSucceed(s, pkt.AckHandle, []byte{0x00, 0x03, 0x00, 0x0d})
@@ -15,31 +27,53 @@ func handleMsgSysDeleteSemaphore(s *Session, p mhfpacket.MHFPacket) {
pkt := p.(*mhfpacket.MsgSysDeleteSemaphore)
sem := pkt.AckHandle
if s.server.semaphore != nil {
s.server.semaphoreLock.Lock()
for id := range s.server.semaphore {
switch sem {
s.server.semaphoreLock.Lock()
for id := range s.server.semaphore {
switch sem {
case 917533:
if s.server.semaphore[id].id_semaphore == "hs_l0u3B51J9k3" {
delete(s.server.semaphore["hs_l0u3B51J9k3"].reservedClientSlots, s.charID)
delete(s.server.semaphore["hs_l0u3B51J9k3"].clients, s)
delete(s.server.semaphore["hs_l0u3B51J9k3"].reservedClientSlots, s.charID)
delete(s.server.semaphore["hs_l0u3B51J9k3"].clients, s)
} else if s.server.semaphore[id].id_semaphore == "hs_l0u3B5129k3" {
delete(s.server.semaphore["hs_l0u3B5129k3"].reservedClientSlots, s.charID)
delete(s.server.semaphore["hs_l0u3B5129k3"].clients, s)
} else if s.server.semaphore[id].id_semaphore == "hs_l0u3B512Ak3" {
delete(s.server.semaphore["hs_l0u3B512Ak3"].reservedClientSlots, s.charID)
delete(s.server.semaphore["hs_l0u3B512Ak3"].clients, s)
}
case 851997:
if s.server.semaphore[id].id_semaphore == "hs_l0u3B51J9k4" {
delete(s.server.semaphore["hs_l0u3B51J9k4"].reservedClientSlots, s.charID)
delete(s.server.semaphore["hs_l0u3B51J9k4"].reservedClientSlots, s.charID)
} else if s.server.semaphore[id].id_semaphore == "hs_l0u3B5129k4" {
delete(s.server.semaphore["hs_l0u3B5129k4"].reservedClientSlots, s.charID)
} else if s.server.semaphore[id].id_semaphore == "hs_l0u3B512Ak4" {
delete(s.server.semaphore["hs_l0u3B512Ak4"].reservedClientSlots, s.charID)
}
case 786461:
if s.server.semaphore[id].id_semaphore == "hs_l0u3B51J9k5" {
delete(s.server.semaphore["hs_l0u3B51J9k5"].reservedClientSlots, s.charID)
delete(s.server.semaphore["hs_l0u3B51J9k5"].reservedClientSlots, s.charID)
} else if s.server.semaphore[id].id_semaphore == "hs_l0u3B5129k5" {
delete(s.server.semaphore["hs_l0u3B5129k5"].reservedClientSlots, s.charID)
} else if s.server.semaphore[id].id_semaphore == "hs_l0u3B512Ak5" {
delete(s.server.semaphore["hs_l0u3B512Ak5"].reservedClientSlots, s.charID)
}
default:
if len(s.server.semaphore[id].reservedClientSlots) != 0 {
if s.server.semaphore[id].id_semaphore != "hs_l0u3B51J9k3" && s.server.semaphore[id].id_semaphore != "hs_l0u3B51J9k4" && s.server.semaphore[id].id_semaphore != "hs_l0u3B51J9k5" {
delete(s.server.semaphore[id].reservedClientSlots, s.charID)
if s.server.semaphore[id].id_semaphore != "hs_l0u3B51J9k3" &&
s.server.semaphore[id].id_semaphore != "hs_l0u3B51J9k4" &&
s.server.semaphore[id].id_semaphore != "hs_l0u3B51J9k5" &&
s.server.semaphore[id].id_semaphore != "hs_l0u3B5129k3" &&
s.server.semaphore[id].id_semaphore != "hs_l0u3B5129k4" &&
s.server.semaphore[id].id_semaphore != "hs_l0u3B5129k5" &&
s.server.semaphore[id].id_semaphore != "hs_l0u3B512Ak3" &&
s.server.semaphore[id].id_semaphore != "hs_l0u3B512Ak4" &&
s.server.semaphore[id].id_semaphore != "hs_l0u3B512Ak5" {
delete(s.server.semaphore[id].reservedClientSlots, s.charID)
}
}
}
}
s.server.semaphoreLock.Unlock()
s.server.semaphoreLock.Unlock()
}
}
@@ -52,11 +86,7 @@ func handleMsgSysCreateAcquireSemaphore(s *Session, p mhfpacket.MHFPacket) {
fmt.Printf("Got reserve stage req, StageID: %v\n\n", SemaphoreID)
if !gotNewStage {
s.server.semaphoreLock.Lock()
if SemaphoreID == "hs_l0u3B51J9k1" ||
SemaphoreID == "hs_l0u3B51J9k2" ||
SemaphoreID == "hs_l0u3B51J9k3" ||
SemaphoreID == "hs_l0u3B51J9k4" ||
SemaphoreID == "hs_l0u3B51J9k5" {
if strings.HasPrefix(SemaphoreID, "hs_l0u3B51") {
s.server.semaphore[SemaphoreID] = NewSemaphore(SemaphoreID, 32)
} else {
s.server.semaphore[SemaphoreID] = NewSemaphore(SemaphoreID, 1)
@@ -68,24 +98,23 @@ func handleMsgSysCreateAcquireSemaphore(s *Session, p mhfpacket.MHFPacket) {
newSemaphore.Lock()
defer newSemaphore.Unlock()
if _, exists := newSemaphore.reservedClientSlots[s.charID]; exists {
s.logger.Info("IS ALREADY EXIST !")
doAckSimpleSucceed(s, pkt.AckHandle, []byte{0x00, 0x0F, 0x00, 0x1D})
} else if uint16(len(newSemaphore.reservedClientSlots)) < newSemaphore.maxPlayers {
switch SemaphoreID {
case "hs_l0u3B51J9k3":
switch SemaphoreID {
case "hs_l0u3B51J9k3", "hs_l0u3B5129k3", "hs_l0u3B512Ak3":
newSemaphore.reservedClientSlots[s.charID] = nil
newSemaphore.clients[s] = s.charID
s.Lock()
s.semaphore = newSemaphore
s.Unlock()
doAckSimpleSucceed(s, pkt.AckHandle, []byte{0x00, 0x0E, 0x00, 0x1D})
case "hs_l0u3B51J9k4":
case "hs_l0u3B51J9k4", "hs_l0u3B5129k4", "hs_l0u3B512Ak4":
newSemaphore.reservedClientSlots[s.charID] = nil
s.Lock()
s.semaphore = newSemaphore
s.Unlock()
doAckSimpleSucceed(s, pkt.AckHandle, []byte{0x00, 0x0D, 0x00, 0x1D})
case "hs_l0u3B51J9k5":
case "hs_l0u3B51J9k5", "hs_l0u3B5129k5", "hs_l0u3B512Ak5":
newSemaphore.reservedClientSlots[s.charID] = nil
s.Lock()
s.semaphore = newSemaphore
@@ -109,29 +138,16 @@ func handleMsgSysAcquireSemaphore(s *Session, p mhfpacket.MHFPacket) {
func handleMsgSysReleaseSemaphore(s *Session, p mhfpacket.MHFPacket) {
//pkt := p.(*mhfpacket.MsgSysReleaseSemaphore)
if _, exists := s.server.semaphore["hs_l0u3B51J9k3"]; exists {
reset := len(s.server.semaphore["hs_l0u3B51J9k3"].reservedClientSlots)
if reset == 0 {
s.server.db.Exec("CALL ravireset($1)", 0)
}
}
}
func removeSessionFromSemaphore(s *Session) {
s.server.semaphoreLock.Lock()
for id := range s.server.semaphore {
delete(s.server.semaphore[id].reservedClientSlots, s.charID)
if id == "hs_l0u3B51J9k3" {
delete(s.server.semaphore[id].clients, s)
} else {
continue
}
}
s.server.semaphoreLock.Unlock()
releaseRaviSemaphore(s)
}
func handleMsgSysCheckSemaphore(s *Session, p mhfpacket.MHFPacket) {
pkt := p.(*mhfpacket.MsgSysCheckSemaphore)
doAckSimpleSucceed(s, pkt.AckHandle, make([]byte, 4))
resp := []byte{0x00, 0x00, 0x00, 0x00}
s.server.semaphoreLock.Lock()
if _, exists := s.server.semaphore[pkt.StageID]; exists {
resp = []byte{0x00, 0x00, 0x00, 0x01}
}
s.server.semaphoreLock.Unlock()
doAckSimpleSucceed(s, pkt.AckHandle, resp)
}

View File

@@ -3,6 +3,7 @@ package channelserver
import (
"fmt"
"time"
"strings"
"github.com/Solenataris/Erupe/network/mhfpacket"
"github.com/Andoryuuta/byteframe"
@@ -11,13 +12,16 @@ import (
func handleMsgSysCreateStage(s *Session, p mhfpacket.MHFPacket) {
pkt := p.(*mhfpacket.MsgSysCreateStage)
s.server.stagesLock.Lock()
s.server.Lock()
defer s.server.Unlock()
if _, exists := s.server.stages[pkt.StageID]; exists {
doAckSimpleFail(s, pkt.AckHandle, []byte{0x00, 0x00, 0x00, 0x00})
} else {
stage := NewStage(pkt.StageID)
stage.maxPlayers = uint16(pkt.PlayerCount)
s.server.stages[stage.id] = stage
s.server.stagesLock.Unlock()
doAckSimpleSucceed(s, pkt.AckHandle, []byte{0x00, 0x00, 0x00, 0x00})
doAckSimpleSucceed(s, pkt.AckHandle, []byte{0x00, 0x00, 0x00, 0x00})
}
}
func handleMsgSysStageDestruct(s *Session, p mhfpacket.MHFPacket) {}
@@ -64,58 +68,65 @@ func doStageTransfer(s *Session, ackHandle uint32, stageID string) {
// Notify existing stage clients that this new client has entered.
s.logger.Info("Sending MsgSysInsertUser")
if s.stage != nil { // avoids lock up when using bed for dream quests
s.stage.BroadcastMHF(&mhfpacket.MsgSysInsertUser{
// Add character to everyone elses stage
s.stage.BroadcastMHF(&mhfpacket.MsgSysInsertUser {
CharID: s.charID,
}, s)
// It seems to be acceptable to recast all MSG_SYS_SET_USER_BINARY messages so far,
// players are still notified when a new player has joined the stage.
// These extra messages may not be needed
//s.stage.BroadcastMHF(&mhfpacket.MsgSysNotifyUserBinary{
// CharID: s.charID,
// BinaryType: 1,
//}, s)
//s.stage.BroadcastMHF(&mhfpacket.MsgSysNotifyUserBinary{
// CharID: s.charID,
// BinaryType: 2,
//}, s)
//s.stage.BroadcastMHF(&mhfpacket.MsgSysNotifyUserBinary{
// CharID: s.charID,
// BinaryType: 3,
//}, s)
// Update others binary of your session
s.server.BroadcastMHF(&mhfpacket.MsgSysNotifyUserBinary {
CharID: s.charID,
BinaryType: 1,
}, s)
s.server.BroadcastMHF(&mhfpacket.MsgSysNotifyUserBinary {
CharID: s.charID,
BinaryType: 2,
}, s)
s.server.BroadcastMHF(&mhfpacket.MsgSysNotifyUserBinary {
CharID: s.charID,
BinaryType: 3,
}, s)
//Notify the entree client about all of the existing clients in the stage.
// Notify the entree client about all of the existing clients in the stage.
s.logger.Info("Notifying entree about existing stage clients")
s.stage.RLock()
clientNotif := byteframe.NewByteFrame()
// Get other players in the stage
for session := range s.stage.clients {
var cur mhfpacket.MHFPacket
cur = &mhfpacket.MsgSysInsertUser{
CharID: session.charID,
}
clientNotif.WriteUint16(uint16(cur.Opcode()))
cur.Build(clientNotif, session.clientContext)
cur = &mhfpacket.MsgSysInsertUser{
CharID: session.charID,
}
clientNotif.WriteUint16(uint16(cur.Opcode()))
cur.Build(clientNotif, session.clientContext)
}
cur = &mhfpacket.MsgSysNotifyUserBinary{
CharID: session.charID,
BinaryType: 1,
}
clientNotif.WriteUint16(uint16(cur.Opcode()))
cur.Build(clientNotif, session.clientContext)
// Get every players binary
for session := range s.server.sessions {
var cur mhfpacket.MHFPacket
session := s.server.sessions[session]
cur = &mhfpacket.MsgSysNotifyUserBinary{
CharID: session.charID,
BinaryType: 2,
}
clientNotif.WriteUint16(uint16(cur.Opcode()))
cur.Build(clientNotif, session.clientContext)
cur = &mhfpacket.MsgSysNotifyUserBinary{
CharID: session.charID,
BinaryType: 1,
}
clientNotif.WriteUint16(uint16(cur.Opcode()))
cur.Build(clientNotif, session.clientContext)
cur = &mhfpacket.MsgSysNotifyUserBinary{
CharID: session.charID,
BinaryType: 3,
}
clientNotif.WriteUint16(uint16(cur.Opcode()))
cur.Build(clientNotif, session.clientContext)
cur = &mhfpacket.MsgSysNotifyUserBinary{
CharID: session.charID,
BinaryType: 2,
}
clientNotif.WriteUint16(uint16(cur.Opcode()))
cur.Build(clientNotif, session.clientContext)
cur = &mhfpacket.MsgSysNotifyUserBinary{
CharID: session.charID,
BinaryType: 3,
}
clientNotif.WriteUint16(uint16(cur.Opcode()))
cur.Build(clientNotif, session.clientContext)
}
s.stage.RUnlock()
clientNotif.WriteUint16(0x0010) // End it.
@@ -143,6 +154,18 @@ func doStageTransfer(s *Session, ackHandle uint32, stageID string) {
}
}
func removeEmptyStages(s *Session) {
s.server.Lock()
for sid, stage := range s.server.stages {
if len(stage.reservedClientSlots) == 0 && len(stage.clients) == 0 {
if strings.HasPrefix(sid, "sl1Qs") || strings.HasPrefix(sid, "sl2Qs") || strings.HasPrefix(sid, "sl3Qs") {
delete(s.server.stages, sid)
}
}
}
s.server.Unlock()
}
func removeSessionFromStage(s *Session) {
s.stage.Lock()
defer s.stage.Unlock()
@@ -151,6 +174,15 @@ func removeSessionFromStage(s *Session) {
delete(s.stage.clients, s)
delete(s.stage.reservedClientSlots, s.charID)
// Remove client from all reservations
s.server.Lock()
for _, stage := range s.server.stages {
if _, exists := stage.reservedClientSlots[s.charID]; exists {
delete(stage.reservedClientSlots, s.charID)
}
}
s.server.Unlock()
// Delete old stage objects owned by the client.
s.logger.Info("Sending MsgSysDeleteObject to old stage clients")
for objID, stageObject := range s.stage.objects {
@@ -163,14 +195,16 @@ func removeSessionFromStage(s *Session) {
// Actually delete it form the objects map.
delete(s.stage.objects, objID)
}
}
}
for objListID, stageObjectList := range s.stage.objectList {
if stageObjectList.charid == s.charID {
//Added to prevent duplicates from flooding ObjectMap and causing server hangs
s.stage.objectList[objListID].status=false
s.stage.objectList[objListID].charid=0
}
}
}
removeEmptyStages(s)
}
@@ -252,7 +286,9 @@ func handleMsgSysReserveStage(s *Session, p mhfpacket.MHFPacket) {
s.server.stagesLock.Unlock()
if !gotStage {
s.logger.Fatal("Failed to get stage", zap.String("StageID", stageID))
s.logger.Error("Failed to get stage", zap.String("StageID", stageID))
doAckSimpleFail(s, pkt.AckHandle, make([]byte, 4))
return
}
// Try to reserve a slot, fail if full.
@@ -427,16 +463,18 @@ func handleMsgSysEnumerateStage(s *Session, p mhfpacket.MHFPacket) {
// Build the response
resp := byteframe.NewByteFrame()
resp.WriteUint16(uint16(len(s.server.stages)))
bf := byteframe.NewByteFrame()
var joinable int
for sid, stage := range s.server.stages {
stage.RLock()
defer stage.RUnlock()
if len(stage.reservedClientSlots)+len(stage.clients) == 0 {
if len(stage.reservedClientSlots) == 0 && len(stage.clients) == 0 {
continue
}
joinable++
resp.WriteUint16(uint16(len(stage.reservedClientSlots))) // Current players.
resp.WriteUint16(0) // Unknown value
resp.WriteUint16(uint16(len(stage.reservedClientSlots))) // Reserved players.
resp.WriteUint16(0) // Unknown value
var hasDeparted uint16
if stage.hasDeparted {
@@ -448,6 +486,8 @@ func handleMsgSysEnumerateStage(s *Session, p mhfpacket.MHFPacket) {
resp.WriteUint8(uint8(len(sid)))
resp.WriteBytes([]byte(sid))
}
bf.WriteUint16(uint16(joinable))
bf.WriteBytes(resp.Data())
doAckBufSucceed(s, pkt.AckHandle, resp.Data())
doAckBufSucceed(s, pkt.AckHandle, bf.Data())
}

View File

@@ -53,6 +53,63 @@ type Server struct {
// Discord chat integration
discordSession *discordgo.Session
raviente *Raviente
}
type Raviente struct {
sync.Mutex
register *RavienteRegister
state *RavienteState
support *RavienteSupport
}
type RavienteRegister struct {
nextTime uint32
startTime uint32
postTime uint32
killedTime uint32
ravienteType uint32
maxPlayers uint32
carveQuest uint32
register []uint32
}
type RavienteState struct {
damageMultiplier uint32
stateData []uint32
}
type RavienteSupport struct {
supportData []uint32
}
// Set up the Raviente variables for the server
func NewRaviente() *Raviente {
ravienteRegister := &RavienteRegister {
nextTime: 0,
startTime: 0,
killedTime: 0,
postTime: 0,
ravienteType: 0,
maxPlayers: 0,
carveQuest: 0,
}
ravienteState := &RavienteState {
damageMultiplier: 1,
}
ravienteSupport := &RavienteSupport { }
ravienteRegister.register = []uint32{0, 0, 0, 0, 0}
ravienteState.stateData = []uint32{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
ravienteSupport.supportData = []uint32{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
raviente := &Raviente {
register: ravienteRegister,
state: ravienteState,
support: ravienteSupport,
}
return raviente
}
// NewServer creates a new Server type.