函数buf_page_init_for_read

/********************************************************************//**
Function which inits a page for read to the buffer buf_pool. If the page is
(1) already in buf_pool, or
(2) if we specify to read only ibuf pages and the page is not an ibuf page, or
(3) if the space is deleted or being deleted,
then this function does nothing.
Sets the io_fix flag to BUF_IO_READ and sets a non-recursive exclusive lock
on the buffer frame. The io-handler must take care that the flag is cleared
and the lock released later.
@return    pointer to the block or NULL */
UNIV_INTERN
buf_page_t*
buf_page_init_for_read(
/*===================*/
    ulint*        err,    /*!< out: DB_SUCCESS or DB_TABLESPACE_DELETED */
    ulint        mode,    /*!< in: BUF_READ_IBUF_PAGES_ONLY, ... */
    ulint        space,    /*!< in: space id */
    ulint        zip_size,/*!< in: compressed page size, or 0 */
    ibool        unzip,    /*!< in: TRUE=request uncompressed page */
    ib_int64_t    tablespace_version,
                /*!< in: prevents reading from a wrong
                version of the tablespace in case we have done
                DISCARD + IMPORT */
    ulint        offset)    /*!< in: page number */
{
    buf_block_t*    block;
    buf_page_t*    bpage    = NULL;
    buf_page_t*    watch_page;
    mtr_t        mtr;
    ulint        fold;
    ibool        lru    = FALSE;
    void*        data;
    buf_pool_t*    buf_pool = buf_pool_get(space, offset);

    ut_ad(buf_pool);

    *err = DB_SUCCESS;

    if (mode == BUF_READ_IBUF_PAGES_ONLY) {
        /* It is a read-ahead within an ibuf routine */

        ut_ad(!ibuf_bitmap_page(zip_size, offset));

        ibuf_mtr_start(&mtr);

        if (!recv_no_ibuf_operations
            && !ibuf_page(space, zip_size, offset, &mtr)) {

            ibuf_mtr_commit(&mtr);

            return(NULL);
        }
    } else {
        ut_ad(mode == BUF_READ_ANY_PAGE);
    }

    if (zip_size && UNIV_LIKELY(!unzip)
        && UNIV_LIKELY(!recv_recovery_is_on())) {
        block = NULL;
    } else {        /**         *按理说 会走到这里  zip_size 为0 ,非压缩页         *         *         */
        block = buf_LRU_get_free_block(buf_pool); //获得一块余闲的block free_list->uzip_lru->common_lru顺序 详见
        ut_ad(block);
        ut_ad(buf_pool_from_block(block) == buf_pool);
    }

    fold = buf_page_address_fold(space, offset);

    buf_pool_mutex_enter(buf_pool);

    watch_page = buf_page_hash_get_low(buf_pool, space, offset, fold);
    if (watch_page && !buf_pool_watch_is_sentinel(buf_pool, watch_page)) {
        /* The page is already in the buffer pool. */
        watch_page = NULL;
err_exit:
        if (block) {
            mutex_enter(&block->mutex);
            buf_LRU_block_free_non_file_page(block);
            mutex_exit(&block->mutex);
        }

        bpage = NULL;
        goto func_exit;
    }

    if (fil_tablespace_deleted_or_being_deleted_in_mem(
            space, tablespace_version)) {
        /* The page belongs to a space which has been
        deleted or is being deleted. */
        *err = DB_TABLESPACE_DELETED;

        goto err_exit;
    }

    if (block) {
        bpage = &block->page;
        mutex_enter(&block->mutex);

        ut_ad(buf_pool_from_bpage(bpage) == buf_pool);

        buf_page_init(buf_pool, space, offset, fold, zip_size, block); //详见        /* The block must be put to the LRU list, to the old blocks */
        buf_LRU_add_block(bpage, TRUE/* to old blocks */);

        /* We set a pass-type x-lock on the frame because then
        the same thread which called for the read operation
        (and is running now at this point of code) can wait
        for the read to complete by waiting for the x-lock on
        the frame; if the x-lock were recursive, the same
        thread would illegally get the x-lock before the page
        read is completed.  The x-lock is cleared by the
        io-handler thread. */

        rw_lock_x_lock_gen(&block->lock, BUF_IO_READ);
        buf_page_set_io_fix(bpage, BUF_IO_READ);//设置bpage->io_fix为BUF_IO_READ,表示将从磁盘读取Page

        if (UNIV_UNLIKELY(zip_size)) {
            /* buf_pool->mutex may be released and
            reacquired by buf_buddy_alloc().  Thus, we
            must release block->mutex in order not to
            break the latching order in the reacquisition
            of buf_pool->mutex.  We also must defer this
            operation until after the block descriptor has
            been added to buf_pool->LRU and
            buf_pool->page_hash. */
            mutex_exit(&block->mutex);
            data = buf_buddy_alloc(buf_pool, zip_size, &lru);
            mutex_enter(&block->mutex);
            block->page.zip.data = data;

            /* To maintain the invariant
            block->in_unzip_LRU_list
            == buf_page_belongs_to_unzip_LRU(&block->page)
            we have to add this block to unzip_LRU
            after block->page.zip.data is set. */
            ut_ad(buf_page_belongs_to_unzip_LRU(&block->page));
            buf_unzip_LRU_add_block(block, TRUE);
        }

        mutex_exit(&block->mutex);
    } else {
        /* The compressed page must be allocated before the
        control block (bpage), in order to avoid the
        invocation of buf_buddy_relocate_block() on
        uninitialized data. */
        data = buf_buddy_alloc(buf_pool, zip_size, &lru);

        /* If buf_buddy_alloc() allocated storage from the LRU list,
        it released and reacquired buf_pool->mutex.  Thus, we must
        check the page_hash again, as it may have been modified. */
        if (UNIV_UNLIKELY(lru)) {

            watch_page = buf_page_hash_get_low(
                buf_pool, space, offset, fold);

            if (watch_page
                && !buf_pool_watch_is_sentinel(buf_pool,
                                  watch_page)) {

                /* The block was added by some other thread. */
                watch_page = NULL;
                buf_buddy_free(buf_pool, data, zip_size);

                bpage = NULL;
                goto func_exit;
            }
        }

        bpage = buf_page_alloc_descriptor();

        /* Initialize the buf_pool pointer. */
        bpage->buf_pool_index = buf_pool_index(buf_pool);

        page_zip_des_init(&bpage->zip);
        page_zip_set_size(&bpage->zip, zip_size);
        bpage->zip.data = data;

        mutex_enter(&buf_pool->zip_mutex);
        UNIV_MEM_DESC(bpage->zip.data,
                  page_zip_get_size(&bpage->zip), bpage);

        buf_page_init_low(bpage);

        bpage->state    = BUF_BLOCK_ZIP_PAGE;
        bpage->space    = space;
        bpage->offset    = offset;

#ifdef UNIV_DEBUG
        bpage->in_page_hash = FALSE;
        bpage->in_zip_hash = FALSE;
        bpage->in_flush_list = FALSE;
        bpage->in_free_list = FALSE;
        bpage->in_LRU_list = FALSE;
#endif /* UNIV_DEBUG */

        ut_d(bpage->in_page_hash = TRUE);

        if (UNIV_LIKELY_NULL(watch_page)) {
            /* Preserve the reference count. */
            ulint    buf_fix_count = watch_page->buf_fix_count;
            ut_a(buf_fix_count > );
            bpage->buf_fix_count += buf_fix_count;
            ut_ad(buf_pool_watch_is_sentinel(buf_pool, watch_page));
            buf_pool_watch_remove(buf_pool, fold, watch_page);
        }

        HASH_INSERT(buf_page_t, hash, buf_pool->page_hash, fold,
                bpage);

        /* The block must be put to the LRU list, to the old blocks
        The zip_size is already set into the page zip */
        buf_LRU_add_block(bpage, TRUE/* to old blocks */);
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
        buf_LRU_insert_zip_clean(bpage);
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */

        buf_page_set_io_fix(bpage, BUF_IO_READ);

        mutex_exit(&buf_pool->zip_mutex);
    }

    buf_pool->n_pend_reads++;
func_exit:
    buf_pool_mutex_exit(buf_pool);

    if (mode == BUF_READ_IBUF_PAGES_ONLY) {

        ibuf_mtr_commit(&mtr);
    }

    ut_ad(!bpage || buf_page_in_file(bpage));
    return(bpage);
}
上一篇:iOS自动化探索(五)自动化测试框架pytest - Assert断言的使用


下一篇:[Erlang 0125] Know a little Erlang opcode