3152 } else {
3153 atomic_inc(&iclog->ic_refcnt);
3154 xlog_state_switch_iclogs(log, iclog, 0);
3155 spin_unlock(&log->l_icloglock);
3156 if (xlog_state_release_iclog(log, iclog))
3157 return XFS_ERROR(EIO);
3158 *log_flushed = 1;
3159 spin_lock(&log->l_icloglock);
3160 }
3161 }
3162
3163 if ((flags & XFS_LOG_SYNC) &&
3164 !(iclog->ic_state & (XLOG_STATE_ACTIVE | XLOG_STATE_DIRTY))) {
3165
3166
3167
3168
3169
3170 if (iclog->ic_state & XLOG_STATE_IOERROR) {
3171 spin_unlock(&log->l_icloglock);
3172 return XFS_ERROR(EIO);
3173 }
3174 XFS_STATS_INC(xs_log_force_sleep);
3175 sv_wait(&iclog->ic_force_wait, PSWP, &log->l_icloglock, s);
3176
3177
3178
3179
3180
3181 if (iclog->ic_state & XLOG_STATE_IOERROR)
3182 return XFS_ERROR(EIO);
3183 *log_flushed = 1;
3184 } else {
3185 spin_unlock(&log->l_icloglock);
3186 }
3187 return 0;
3188
3189 } while (iclog != log->l_iclog);
3190
3191 spin_unlock(&log->l_icloglock);
3192 return 0;