388 n_val = m_n_config.s.n_skt;
389 mmr_base =
390 uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR) &
391 ~UV_MMR_ENABLE;
392 printk(KERN_DEBUG "UV: global MMR base 0x%lx\n", mmr_base);
393
394 for(i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++)
395 uv_possible_blades +=
396 hweight64(uv_read_local_mmr( UVH_NODE_PRESENT_TABLE + i * 8));
397 printk(KERN_DEBUG "UV: Found %d blades\n", uv_num_possible_blades());
398
399 bytes = sizeof(struct uv_blade_info) * uv_num_possible_blades();
400 uv_blade_info = kmalloc(bytes, GFP_KERNEL);
401
402 get_lowmem_redirect(&lowmem_redir_base, &lowmem_redir_size);
403
404 bytes = sizeof(uv_node_to_blade[0]) * num_possible_nodes();
405 uv_node_to_blade = kmalloc(bytes, GFP_KERNEL);
406 memset(uv_node_to_blade, 255, bytes);
407
408 bytes = sizeof(uv_cpu_to_blade[0]) * num_possible_cpus();
409 uv_cpu_to_blade = kmalloc(bytes, GFP_KERNEL);
410 memset(uv_cpu_to_blade, 255, bytes);
411
412 blade = 0;
413 for (i = 0; i < UVH_NODE_PRESENT_TABLE_DEPTH; i++) {
414 present = uv_read_local_mmr(UVH_NODE_PRESENT_TABLE + i * 8);
415 for (j = 0; j < 64; j++) {
416 if (!test_bit(j, &present))
417 continue;
418 uv_blade_info[blade].pnode = (i * 64 + j);
419 uv_blade_info[blade].nr_possible_cpus = 0;
420 uv_blade_info[blade].nr_online_cpus = 0;
421 blade++;
422 }
423 }
424
425 node_id.v = uv_read_local_mmr(UVH_NODE_ID);
426 gnode_upper = (((unsigned long)node_id.s.node_id) &
427 ~((1 << n_val) - 1)) << m_val;
428