fdtab[fd].owner = owner;
fdtab[fd].iocb = iocb;
fdtab[fd].ev = 0;
- fdtab[fd].update_mask &= ~tid_bit;
fdtab[fd].linger_risk = 0;
fdtab[fd].cloned = 0;
fdtab[fd].thread_mask = thread_mask;
for (updt_idx = 0; updt_idx < fd_nbupdt; updt_idx++) {
fd = fd_updt[updt_idx];
+ HA_ATOMIC_AND(&fdtab[fd].update_mask, ~tid_bit);
if (!fdtab[fd].owner) {
activity[tid].poll_drop++;
continue;
}
en = fdtab[fd].state;
- HA_ATOMIC_AND(&fdtab[fd].update_mask, ~tid_bit);
if (fdtab[fd].polled_mask & tid_bit) {
if (!(fdtab[fd].thread_mask & tid_bit) || !(en & FD_EV_POLLED_RW)) {
for (updt_idx = 0; updt_idx < fd_nbupdt; updt_idx++) {
fd = fd_updt[updt_idx];
+ HA_ATOMIC_AND(&fdtab[fd].update_mask, ~tid_bit);
if (!fdtab[fd].owner) {
activity[tid].poll_drop++;
continue;
}
en = fdtab[fd].state;
- HA_ATOMIC_AND(&fdtab[fd].update_mask, ~tid_bit);
if (!(fdtab[fd].thread_mask & tid_bit) || !(en & FD_EV_POLLED_RW)) {
if (!(fdtab[fd].polled_mask & tid_bit)) {
for (updt_idx = 0; updt_idx < fd_nbupdt; updt_idx++) {
fd = fd_updt[updt_idx];
+ HA_ATOMIC_AND(&fdtab[fd].update_mask, ~tid_bit);
if (!fdtab[fd].owner) {
activity[tid].poll_drop++;
continue;
}
en = fdtab[fd].state;
- HA_ATOMIC_AND(&fdtab[fd].update_mask, ~tid_bit);
/* we have a single state for all threads, which is why we
* don't check the tid_bit. First thread to see the update
for (updt_idx = 0; updt_idx < fd_nbupdt; updt_idx++) {
fd = fd_updt[updt_idx];
+ HA_ATOMIC_AND(&fdtab[fd].update_mask, ~tid_bit);
if (!fdtab[fd].owner) {
activity[tid].poll_drop++;
continue;
}
en = fdtab[fd].state;
- HA_ATOMIC_AND(&fdtab[fd].update_mask, ~tid_bit);
/* we have a single state for all threads, which is why we
* don't check the tid_bit. First thread to see the update